diff --git a/autoarray/config/visualize/plots.yaml b/autoarray/config/visualize/plots.yaml index aa2664506..7d10414f6 100644 --- a/autoarray/config/visualize/plots.yaml +++ b/autoarray/config/visualize/plots.yaml @@ -27,7 +27,7 @@ inversion: # Settings for plots of inversions (e sub_pixels_per_image_pixels: false # Plot the number of sub pixels per masked data pixels? mesh_pixels_per_image_pixels: false # Plot the number of image-plane mesh pixels per masked data pixels? image_pixels_per_mesh_pixels: false # Plot the number of image pixels in each pixel of the mesh? - reconstructed_image: false # Plot image of the reconstructed data (e.g. in the image-plane)? + reconstructed_operated_data: false # Plot image of the reconstructed data (e.g. in the image-plane)? reconstruction: false # Plot the reconstructed inversion (e.g. the pixelization's mesh in the source-plane)? regularization_weights: false # Plot the effective regularization weight of every inversion mesh pixel? fit_interferometer: # Settings for plots of fits to interferometer datasets (e.g. FitInterferometerPlotter). diff --git a/autoarray/inversion/inversion/abstract.py b/autoarray/inversion/inversion/abstract.py index 54604a366..ae991a494 100644 --- a/autoarray/inversion/inversion/abstract.py +++ b/autoarray/inversion/inversion/abstract.py @@ -514,25 +514,9 @@ def source_quantity_dict_from( return source_quantity_dict @property - def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]: + def mapped_reconstructed_operated_data_dict(self) -> Dict[LinearObj, Array2D]: raise NotImplementedError - @property - def mapped_reconstructed_image_dict(self) -> Dict[LinearObj, Array2D]: - """ - Using the reconstructed source pixel fluxes we map each source pixel flux back to the image plane and - reconstruct the image data. - - This uses the unique mappings of every source pixel to image pixels, which is a quantity that is already - computed when using the w-tilde formalism. - - Returns - ------- - Array2D - The reconstructed image data which the inversion fits. - """ - return self.mapped_reconstructed_data_dict - @property def mapped_reconstructed_data(self) -> Union[Array2D, Visibilities]: """ @@ -550,7 +534,7 @@ def mapped_reconstructed_data(self) -> Union[Array2D, Visibilities]: return sum(self.mapped_reconstructed_data_dict.values()) @property - def mapped_reconstructed_image(self) -> Array2D: + def mapped_reconstructed_operated_data(self) -> Union[Array2D, Visibilities]: """ Using the reconstructed source pixel fluxes we map each source pixel flux back to the image plane and reconstruct the image data. @@ -563,7 +547,7 @@ def mapped_reconstructed_image(self) -> Array2D: Array2D The reconstructed image data which the inversion fits. """ - return sum(self.mapped_reconstructed_image_dict.values()) + return sum(self.mapped_reconstructed_operated_data_dict.values()) @property def data_subtracted_dict(self) -> Dict[LinearObj, Array2D]: @@ -589,7 +573,7 @@ def data_subtracted_dict(self) -> Dict[LinearObj, Array2D]: if linear_obj != linear_obj_other: data_subtracted_dict[ linear_obj - ] -= self.mapped_reconstructed_image_dict[linear_obj_other] + ] -= self.mapped_reconstructed_operated_data_dict[linear_obj_other] return data_subtracted_dict diff --git a/autoarray/inversion/inversion/imaging/abstract.py b/autoarray/inversion/inversion/imaging/abstract.py index ab0c79186..4269306b1 100644 --- a/autoarray/inversion/inversion/imaging/abstract.py +++ b/autoarray/inversion/inversion/imaging/abstract.py @@ -75,6 +75,19 @@ def __init__( def psf(self): return self.dataset.psf + @property + def mapping_matrix_list(self) -> List[np.ndarray]: + """ + The `mapping_matrix` of a linear object describes the mappings between the observed data's values and + the linear object's model, before an operation like a convolution is applied. + + This is used to construct the simultaneous linear equations which reconstruct the data. + + This property returns a list containing each linear object's unoperated (unconvolved) `mapping_matrix` + as defined on the corresponding `LinearObj` instance. + """ + return [linear_obj.mapping_matrix for linear_obj in self.linear_obj_list] + @property def operated_mapping_matrix_list(self) -> List[np.ndarray]: """ @@ -115,6 +128,30 @@ def _updated_cls_key_dict_from(self, cls: Type, preload_dict: Dict) -> Dict: return cls_dict + @property + def linear_func_mapping_matrix_dict(self) -> Dict: + """ + The `operated_mapping_matrix` of a linear object describes the mappings between the observed data's values and + the linear objects model, including a 2D convolution operation. It is described fully in the method + `operated_mapping_matrix`. + + This property returns a dictionary mapping every linear func object to its corresponded operated mapping + matrix, which is used for constructing the matrices that perform the linear inversion in an efficent way + for the psf precision operator calculation. + + Returns + ------- + A dictionary mapping every linear function object to its operated mapping matrix. + """ + + linear_func_mapping_matrix_dict = {} + + for linear_func in self.cls_list_from(cls=AbstractLinearObjFuncList): + + linear_func_mapping_matrix_dict[linear_func] = linear_func.mapping_matrix + + return linear_func_mapping_matrix_dict + @property def linear_func_operated_mapping_matrix_dict(self) -> Dict: """ diff --git a/autoarray/inversion/inversion/imaging/inversion_imaging_util.py b/autoarray/inversion/inversion/imaging/inversion_imaging_util.py index 6001b5a89..776938805 100644 --- a/autoarray/inversion/inversion/imaging/inversion_imaging_util.py +++ b/autoarray/inversion/inversion/imaging/inversion_imaging_util.py @@ -277,7 +277,7 @@ def curvature_matrix_with_added_to_diag_from( return curvature_matrix.at[inds, inds].add(value) -def mapped_reconstructed_image_via_sparse_operator_from( +def mapped_reconstructed_operated_data_via_sparse_operator_from( reconstruction, # (S,) rows, cols, diff --git a/autoarray/inversion/inversion/imaging/mapping.py b/autoarray/inversion/inversion/imaging/mapping.py index 00c618eb5..318a55800 100644 --- a/autoarray/inversion/inversion/imaging/mapping.py +++ b/autoarray/inversion/inversion/imaging/mapping.py @@ -101,8 +101,44 @@ def curvature_matrix(self): xp=self._xp, ) + def _mapped_reconstructed_data_dict_from( + self, + mapping_matrix_list, + ) -> Dict["LinearObj", "Array2D"]: + """ + Shared implementation for mapping a reconstruction to image-plane arrays + using a provided list of mapping matrices (operated or unoperated). + """ + mapped_reconstructed_data_dict = {} + + reconstruction_dict = self.source_quantity_dict_from( + source_quantity=self.reconstruction + ) + + for index, linear_obj in enumerate(self.linear_obj_list): + reconstruction = reconstruction_dict[linear_obj] + + mapped_reconstructed_operated_data = ( + inversion_util.mapped_reconstructed_data_via_mapping_matrix_from( + mapping_matrix=mapping_matrix_list[index], + reconstruction=reconstruction, + xp=self._xp, + ) + ) + + mapped_reconstructed_operated_data = Array2D( + values=mapped_reconstructed_operated_data, + mask=self.mask, + ) + + mapped_reconstructed_data_dict[linear_obj] = ( + mapped_reconstructed_operated_data + ) + + return mapped_reconstructed_data_dict + @property - def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]: + def mapped_reconstructed_data_dict(self) -> Dict["LinearObj", "Array2D"]: """ When constructing the simultaneous linear equations (via vectors and matrices) the quantities of each individual linear object (e.g. their `mapping_matrix`) are combined into single ndarrays via stacking. This does not track @@ -116,6 +152,9 @@ def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]: This function converts an ndarray of a `reconstruction` to a dictionary of ndarrays containing each linear object's reconstructed data values, where the keys are the instances of each mapper in the inversion. + The images are the unconvolved reconstructed data values, meaning they are the solved for reconstruction + with PSF operations removed. + To perform this mapping the `mapping_matrix` is used, which straightforwardly describes how every value of the `reconstruction` maps to pixels in the data-frame after the 2D convolution operation has been performed. @@ -125,30 +164,37 @@ def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]: The reconstruction (in the source frame) whose values are mapped to a dictionary of values for each individual mapper (in the image-plane). """ - - mapped_reconstructed_data_dict = {} - - reconstruction_dict = self.source_quantity_dict_from( - source_quantity=self.reconstruction + return self._mapped_reconstructed_data_dict_from( + mapping_matrix_list=self.mapping_matrix_list ) - operated_mapping_matrix_list = self.operated_mapping_matrix_list + @property + def mapped_reconstructed_operated_data_dict(self) -> Dict["LinearObj", "Array2D"]: + """ + When constructing the simultaneous linear equations (via vectors and matrices) the quantities of each individual + linear object (e.g. their `mapping_matrix`) are combined into single ndarrays via stacking. This does not track + which quantities belong to which linear objects, therefore the linear equation's solutions (which are returned + as ndarrays) do not contain information on which linear object(s) they correspond to. - for index, linear_obj in enumerate(self.linear_obj_list): - reconstruction = reconstruction_dict[linear_obj] + For example, consider if two `Mapper` objects with 50 and 100 source pixels are used in an `Inversion`. + The `reconstruction` (which contains the solved for source pixels values) is an ndarray of shape [150], but + the ndarray itself does not track which values belong to which `Mapper`. - mapped_reconstructed_image = ( - inversion_util.mapped_reconstructed_data_via_mapping_matrix_from( - mapping_matrix=operated_mapping_matrix_list[index], - reconstruction=reconstruction, - xp=self._xp, - ) - ) + This function converts an ndarray of a `reconstruction` to a dictionary of ndarrays containing each linear + object's reconstructed data values, where the keys are the instances of each mapper in the inversion. - mapped_reconstructed_image = Array2D( - values=mapped_reconstructed_image, mask=self.mask - ) + The images are the convolved reconstructed data values, meaning they are the solved for reconstruction with PSF + operations included. - mapped_reconstructed_data_dict[linear_obj] = mapped_reconstructed_image + To perform this mapping the `mapping_matrix` is used, which straightforwardly describes how every value of + the `reconstruction` maps to pixels in the data-frame after the 2D convolution operation has been performed. - return mapped_reconstructed_data_dict + Parameters + ---------- + reconstruction + The reconstruction (in the source frame) whose values are mapped to a dictionary of values for each + individual mapper (in the image-plane). + """ + return self._mapped_reconstructed_data_dict_from( + mapping_matrix_list=self.operated_mapping_matrix_list + ) diff --git a/autoarray/inversion/inversion/imaging/sparse.py b/autoarray/inversion/inversion/imaging/sparse.py index 828ead97d..8f6ab1ba5 100644 --- a/autoarray/inversion/inversion/imaging/sparse.py +++ b/autoarray/inversion/inversion/imaging/sparse.py @@ -483,46 +483,32 @@ def _curvature_matrix_func_list_and_mapper(self) -> np.ndarray: return curvature_matrix - @property - def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]: + def _mapped_reconstructed_data_dict_from( + self, + *, + use_operated_for_linear_func: bool, + ) -> Dict["LinearObj", "Array2D"]: """ - When constructing the simultaneous linear equations (via vectors and matrices) the quantities of each individual - linear object (e.g. their `mapping_matrix`) are combined into single ndarrays via stacking. This does not track - which quantities belong to which linear objects, therefore the linear equation's solutions (which are returned - as ndarrays) do not contain information on which linear object(s) they correspond to. - - For example, consider if two `Mapper` objects with 50 and 100 source pixels are used in an `Inversion`. - The `reconstruction` (which contains the solved for source pixels values) is an ndarray of shape [150], but - the ndarray itself does not track which values belong to which `Mapper`. + Shared implementation for mapping a reconstruction to image-plane arrays for each linear object. - This function converts an ndarray of a `reconstruction` to a dictionary of ndarrays containing each linear - object's reconstructed data values, where the keys are the instances of each mapper in the inversion. - - The w-tilde formalism bypasses the calculation of the `mapping_matrix` and it therefore cannot be used to map - the reconstruction's values to the image-plane. Instead, the unique data-to-pixelization mappings are used, - including the 2D convolution operation after mapping is complete. - - Parameters - ---------- - reconstruction - The reconstruction (in the source frame) whose values are mapped to a dictionary of values for each - individual mapper (in the image-plane). + For AbstractMapper objects this uses the sparse operator mapping, and optionally applies the PSF. + For linear-func objects this uses either the operated or unoperated mapping matrix dict. """ - - mapped_reconstructed_data_dict = {} + mapped_dict = {} reconstruction_dict = self.source_quantity_dict_from( source_quantity=self.reconstruction ) for linear_obj in self.linear_obj_list: + reconstruction = reconstruction_dict[linear_obj] if isinstance(linear_obj, AbstractMapper): rows, cols, vals = linear_obj.sparse_triplets_curvature - mapped_reconstructed_image = inversion_imaging_util.mapped_reconstructed_image_via_sparse_operator_from( + mapped = inversion_imaging_util.mapped_reconstructed_operated_data_via_sparse_operator_from( reconstruction=reconstruction, rows=rows, cols=cols, @@ -531,32 +517,94 @@ def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]: data_shape=self.mask.shape_native, ) - mapped_reconstructed_image = Array2D( - values=mapped_reconstructed_image, mask=self.mask - ) + mapped = Array2D(values=mapped, mask=self.mask) - mapped_reconstructed_image = self.psf.convolved_image_from( - image=mapped_reconstructed_image, blurring_image=None, xp=self._xp - ).array + if use_operated_for_linear_func: - mapped_reconstructed_image = Array2D( - values=mapped_reconstructed_image, mask=self.mask - ) + mapped = self.psf.convolved_image_from( + image=mapped, + blurring_image=None, + xp=self._xp, + ).array + + mapped = Array2D(values=mapped, mask=self.mask) else: - operated_mapping_matrix = self.linear_func_operated_mapping_matrix_dict[ - linear_obj - ] + if use_operated_for_linear_func: + mapping_matrix = self.linear_func_operated_mapping_matrix_dict[ + linear_obj + ] + else: + mapping_matrix = self.linear_func_mapping_matrix_dict[linear_obj] - mapped_reconstructed_image = self._xp.sum( - reconstruction * operated_mapping_matrix, axis=1 - ) + mapped = self._xp.sum(reconstruction * mapping_matrix, axis=1) + mapped = Array2D(values=mapped, mask=self.mask) - mapped_reconstructed_image = Array2D( - values=mapped_reconstructed_image, mask=self.mask - ) + mapped_dict[linear_obj] = mapped - mapped_reconstructed_data_dict[linear_obj] = mapped_reconstructed_image + return mapped_dict + + @property + def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]: + """ + When constructing the simultaneous linear equations (via vectors and matrices) the quantities of each individual + linear object (e.g. their `mapping_matrix`) are combined into single ndarrays via stacking. This does not track + which quantities belong to which linear objects, therefore the linear equation's solutions (which are returned + as ndarrays) do not contain information on which linear object(s) they correspond to. + + For example, consider if two `Mapper` objects with 50 and 100 source pixels are used in an `Inversion`. + The `reconstruction` (which contains the solved for source pixels values) is an ndarray of shape [150], but + the ndarray itself does not track which values belong to which `Mapper`. - return mapped_reconstructed_data_dict + This function converts an ndarray of a `reconstruction` to a dictionary of ndarrays containing each linear + object's reconstructed data values, where the keys are the instances of each mapper in the inversion. + + The images are the unconvolved reconstructed data values, meaning they are the solved for reconstruction + with PSF operations removed. + + The w-tilde formalism bypasses the calculation of the `mapping_matrix` and it therefore cannot be used to map + the reconstruction's values to the image-plane. Instead, the unique data-to-pixelization mappings are used, + including the 2D convolution operation after mapping is complete. + + Parameters + ---------- + reconstruction + The reconstruction (in the source frame) whose values are mapped to a dictionary of values for each + individual mapper (in the image-plane). + """ + return self._mapped_reconstructed_data_dict_from( + use_operated_for_linear_func=False + ) + + @property + def mapped_reconstructed_operated_data_dict(self) -> Dict[LinearObj, Array2D]: + """ + When constructing the simultaneous linear equations (via vectors and matrices) the quantities of each individual + linear object (e.g. their `mapping_matrix`) are combined into single ndarrays via stacking. This does not track + which quantities belong to which linear objects, therefore the linear equation's solutions (which are returned + as ndarrays) do not contain information on which linear object(s) they correspond to. + + For example, consider if two `Mapper` objects with 50 and 100 source pixels are used in an `Inversion`. + The `reconstruction` (which contains the solved for source pixels values) is an ndarray of shape [150], but + the ndarray itself does not track which values belong to which `Mapper`. + + This function converts an ndarray of a `reconstruction` to a dictionary of ndarrays containing each linear + object's reconstructed data values, where the keys are the instances of each mapper in the inversion. + + The images are the convolved reconstructed data values, meaning they are the solved for reconstruction with PSF + operations included. + + The w-tilde formalism bypasses the calculation of the `mapping_matrix` and it therefore cannot be used to map + the reconstruction's values to the image-plane. Instead, the unique data-to-pixelization mappings are used, + including the 2D convolution operation after mapping is complete. + + Parameters + ---------- + reconstruction + The reconstruction (in the source frame) whose values are mapped to a dictionary of values for each + individual mapper (in the image-plane). + """ + return self._mapped_reconstructed_data_dict_from( + use_operated_for_linear_func=True + ) diff --git a/autoarray/inversion/inversion/imaging_numba/inversion_imaging_numba_util.py b/autoarray/inversion/inversion/imaging_numba/inversion_imaging_numba_util.py index 7baa76454..27b680ce5 100644 --- a/autoarray/inversion/inversion/imaging_numba/inversion_imaging_numba_util.py +++ b/autoarray/inversion/inversion/imaging_numba/inversion_imaging_numba_util.py @@ -790,17 +790,17 @@ def mapped_reconstructed_data_via_image_to_pix_unique_from( data_pixels = data_to_pix_unique.shape[0] - mapped_reconstructed_data = np.zeros(data_pixels) + mapped_reconstructed_operated_data = np.zeros(data_pixels) for data_0 in range(data_pixels): for pix_0 in range(pix_lengths[data_0]): pix_for_data = data_to_pix_unique[data_0, pix_0] - mapped_reconstructed_data[data_0] += ( + mapped_reconstructed_operated_data[data_0] += ( data_weights[data_0, pix_0] * reconstruction[pix_for_data] ) - return mapped_reconstructed_data + return mapped_reconstructed_operated_data class SparseLinAlgImagingNumba: diff --git a/autoarray/inversion/inversion/imaging_numba/sparse.py b/autoarray/inversion/inversion/imaging_numba/sparse.py index 8a3f6dab1..91bb74577 100644 --- a/autoarray/inversion/inversion/imaging_numba/sparse.py +++ b/autoarray/inversion/inversion/imaging_numba/sparse.py @@ -467,6 +467,61 @@ def _curvature_matrix_func_list_and_mapper(self) -> np.ndarray: return curvature_matrix + def _mapped_reconstructed_data_dict_from( + self, + *, + use_operated_for_linear_func: bool, + ) -> Dict["LinearObj", "Array2D"]: + """ + Shared implementation for mapping a reconstruction to image-plane arrays for each linear object. + + - AbstractMapper: uses unique mappings (w-tilde compatible) + PSF convolution. + - Linear-func: uses either operated or unoperated mapping matrix dict. + """ + mapped_dict: Dict["LinearObj", "Array2D"] = {} + + reconstruction_dict = self.source_quantity_dict_from( + source_quantity=self.reconstruction + ) + + for linear_obj in self.linear_obj_list: + reconstruction = reconstruction_dict[linear_obj] + + if isinstance(linear_obj, AbstractMapper): + + mapped = inversion_imaging_numba_util.mapped_reconstructed_data_via_image_to_pix_unique_from( + data_to_pix_unique=linear_obj.unique_mappings.data_to_pix_unique, + data_weights=linear_obj.unique_mappings.data_weights, + pix_lengths=linear_obj.unique_mappings.pix_lengths, + reconstruction=np.array(reconstruction), + ) + + mapped = Array2D(values=mapped, mask=self.mask) + + if use_operated_for_linear_func: + mapped = self.psf.convolved_image_from( + image=mapped, + blurring_image=None, + ).array + + mapped = Array2D(values=mapped, mask=self.mask) + + else: + + if use_operated_for_linear_func: + mapping_matrix = self.linear_func_operated_mapping_matrix_dict[ + linear_obj + ] + else: + mapping_matrix = self.linear_func_mapping_matrix_dict[linear_obj] + + mapped = np.sum(reconstruction * mapping_matrix, axis=1) + mapped = Array2D(values=mapped, mask=self.mask) + + mapped_dict[linear_obj] = mapped + + return mapped_dict + @property def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]: """ @@ -482,6 +537,9 @@ def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]: This function converts an ndarray of a `reconstruction` to a dictionary of ndarrays containing each linear object's reconstructed data values, where the keys are the instances of each mapper in the inversion. + The images are the unconvolved reconstructed data values, meaning they are the solved for reconstruction + with PSF operations removed. + The w-tilde formalism bypasses the calculation of the `mapping_matrix` and it therefore cannot be used to map the reconstruction's values to the image-plane. Instead, the unique data-to-pixelization mappings are used, including the 2D convolution operation after mapping is complete. @@ -492,52 +550,38 @@ def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]: The reconstruction (in the source frame) whose values are mapped to a dictionary of values for each individual mapper (in the image-plane). """ - - mapped_reconstructed_data_dict = {} - - reconstruction_dict = self.source_quantity_dict_from( - source_quantity=self.reconstruction + return self._mapped_reconstructed_data_dict_from( + use_operated_for_linear_func=False ) - for linear_obj in self.linear_obj_list: - reconstruction = reconstruction_dict[linear_obj] - - if isinstance(linear_obj, AbstractMapper): - - mapped_reconstructed_image = inversion_imaging_numba_util.mapped_reconstructed_data_via_image_to_pix_unique_from( - data_to_pix_unique=linear_obj.unique_mappings.data_to_pix_unique, - data_weights=linear_obj.unique_mappings.data_weights, - pix_lengths=linear_obj.unique_mappings.pix_lengths, - reconstruction=np.array(reconstruction), - ) - - mapped_reconstructed_image = Array2D( - values=mapped_reconstructed_image, mask=self.mask - ) - - mapped_reconstructed_image = self.psf.convolved_image_from( - image=mapped_reconstructed_image, - blurring_image=None, - ).array - - mapped_reconstructed_image = Array2D( - values=mapped_reconstructed_image, mask=self.mask - ) - - else: + @property + def mapped_reconstructed_operated_data_dict(self) -> Dict[LinearObj, Array2D]: + """ + When constructing the simultaneous linear equations (via vectors and matrices) the quantities of each individual + linear object (e.g. their `mapping_matrix`) are combined into single ndarrays via stacking. This does not track + which quantities belong to which linear objects, therefore the linear equation's solutions (which are returned + as ndarrays) do not contain information on which linear object(s) they correspond to. - operated_mapping_matrix = self.linear_func_operated_mapping_matrix_dict[ - linear_obj - ] + For example, consider if two `Mapper` objects with 50 and 100 source pixels are used in an `Inversion`. + The `reconstruction` (which contains the solved for source pixels values) is an ndarray of shape [150], but + the ndarray itself does not track which values belong to which `Mapper`. - mapped_reconstructed_image = np.sum( - reconstruction * operated_mapping_matrix, axis=1 - ) + This function converts an ndarray of a `reconstruction` to a dictionary of ndarrays containing each linear + object's reconstructed data values, where the keys are the instances of each mapper in the inversion. - mapped_reconstructed_image = Array2D( - values=mapped_reconstructed_image, mask=self.mask - ) + The images are the convolved reconstructed data values, meaning they are the solved for reconstruction with PSF + operations included. - mapped_reconstructed_data_dict[linear_obj] = mapped_reconstructed_image + The w-tilde formalism bypasses the calculation of the `mapping_matrix` and it therefore cannot be used to map + the reconstruction's values to the image-plane. Instead, the unique data-to-pixelization mappings are used, + including the 2D convolution operation after mapping is complete. - return mapped_reconstructed_data_dict + Parameters + ---------- + reconstruction + The reconstruction (in the source frame) whose values are mapped to a dictionary of values for each + individual mapper (in the image-plane). + """ + return self._mapped_reconstructed_data_dict_from( + use_operated_for_linear_func=True + ) diff --git a/autoarray/inversion/inversion/interferometer/abstract.py b/autoarray/inversion/inversion/interferometer/abstract.py index 3d8fb2deb..958fa71c5 100644 --- a/autoarray/inversion/inversion/interferometer/abstract.py +++ b/autoarray/inversion/inversion/interferometer/abstract.py @@ -71,7 +71,7 @@ def operated_mapping_matrix_list(self) -> List[np.ndarray]: ] @property - def mapped_reconstructed_image_dict( + def mapped_reconstructed_data_dict( self, ) -> Dict[LinearObj, Array2D]: """ @@ -97,7 +97,7 @@ def mapped_reconstructed_image_dict( The reconstruction (in the source frame) whose values are mapped to a dictionary of values for each individual mapper (in the image-plane). """ - mapped_reconstructed_image_dict = {} + mapped_reconstructed_data_dict = {} reconstruction_dict = self.source_quantity_dict_from( source_quantity=self.reconstruction @@ -106,7 +106,7 @@ def mapped_reconstructed_image_dict( for linear_obj in self.linear_obj_list: reconstruction = reconstruction_dict[linear_obj] - mapped_reconstructed_image = ( + mapped_reconstructed_data = ( inversion_util.mapped_reconstructed_data_via_mapping_matrix_from( mapping_matrix=linear_obj.mapping_matrix, reconstruction=reconstruction, @@ -114,13 +114,13 @@ def mapped_reconstructed_image_dict( ) ) - mapped_reconstructed_image = Array2D( - values=mapped_reconstructed_image, mask=self.mask + mapped_reconstructed_data = Array2D( + values=mapped_reconstructed_data, mask=self.mask ) - mapped_reconstructed_image_dict[linear_obj] = mapped_reconstructed_image + mapped_reconstructed_data_dict[linear_obj] = mapped_reconstructed_data - return mapped_reconstructed_image_dict + return mapped_reconstructed_data_dict @property def fast_chi_squared(self): diff --git a/autoarray/inversion/inversion/interferometer/mapping.py b/autoarray/inversion/inversion/interferometer/mapping.py index f1455b93b..223bc3387 100644 --- a/autoarray/inversion/inversion/interferometer/mapping.py +++ b/autoarray/inversion/inversion/interferometer/mapping.py @@ -112,7 +112,7 @@ def curvature_matrix(self) -> np.ndarray: return curvature_matrix @property - def mapped_reconstructed_data_dict( + def mapped_reconstructed_operated_data_dict( self, ) -> Dict[LinearObj, Visibilities]: """ @@ -132,7 +132,7 @@ def mapped_reconstructed_data_dict( the `reconstruction` maps to pixels in the data-frame after the 2D non-uniform fast Fourier transformer operation has been performed. """ - mapped_reconstructed_data_dict = {} + mapped_reconstructed_operated_data_dict = {} reconstruction_dict = self.source_quantity_dict_from( source_quantity=self.reconstruction @@ -152,6 +152,6 @@ def mapped_reconstructed_data_dict( visibilities = Visibilities(visibilities=visibilities) - mapped_reconstructed_data_dict[linear_obj] = visibilities + mapped_reconstructed_operated_data_dict[linear_obj] = visibilities - return mapped_reconstructed_data_dict + return mapped_reconstructed_operated_data_dict diff --git a/autoarray/inversion/inversion/interferometer/sparse.py b/autoarray/inversion/inversion/interferometer/sparse.py index 20b3746ec..28b9076fa 100644 --- a/autoarray/inversion/inversion/interferometer/sparse.py +++ b/autoarray/inversion/inversion/interferometer/sparse.py @@ -109,7 +109,7 @@ def curvature_matrix_diag(self) -> np.ndarray: ) @property - def mapped_reconstructed_data_dict( + def mapped_reconstructed_operated_data_dict( self, ) -> Dict[LinearObj, Visibilities]: """ @@ -135,9 +135,9 @@ def mapped_reconstructed_data_dict( The reconstruction (in the source frame) whose values are mapped to a dictionary of values for each individual mapper (in the image-plane). """ - mapped_reconstructed_data_dict = {} + mapped_reconstructed_operated_data_dict = {} - image_dict = self.mapped_reconstructed_image_dict + image_dict = self.mapped_reconstructed_data_dict for linear_obj in self.linear_obj_list: visibilities = self.transformer.visibilities_from( @@ -146,6 +146,6 @@ def mapped_reconstructed_data_dict( visibilities = Visibilities(visibilities=visibilities) - mapped_reconstructed_data_dict[linear_obj] = visibilities + mapped_reconstructed_operated_data_dict[linear_obj] = visibilities - return mapped_reconstructed_data_dict + return mapped_reconstructed_operated_data_dict diff --git a/autoarray/inversion/mock/mock_inversion.py b/autoarray/inversion/mock/mock_inversion.py index 4d5392398..a73b3df1a 100644 --- a/autoarray/inversion/mock/mock_inversion.py +++ b/autoarray/inversion/mock/mock_inversion.py @@ -23,7 +23,7 @@ def __init__( reconstruction: np.ndarray = None, reconstruction_dict: List[np.ndarray] = None, mapped_reconstructed_data_dict=None, - mapped_reconstructed_image_dict=None, + mapped_reconstructed_operated_data_dict=None, reconstruction_noise_map: np.ndarray = None, reconstruction_noise_map_dict: List[np.ndarray] = None, regularization_term=None, @@ -57,7 +57,9 @@ def __init__( self._reconstruction_dict = reconstruction_dict self._mapped_reconstructed_data_dict = mapped_reconstructed_data_dict - self._mapped_reconstructed_image_dict = mapped_reconstructed_image_dict + self._mapped_reconstructed_operated_data_dict = ( + mapped_reconstructed_operated_data_dict + ) self._reconstruction_noise_map = reconstruction_noise_map self._reconstruction_noise_map_dict = reconstruction_noise_map_dict @@ -152,10 +154,10 @@ def mapped_reconstructed_data_dict(self): return self._mapped_reconstructed_data_dict @property - def mapped_reconstructed_image_dict(self): + def mapped_reconstructed_operated_data_dict(self): """ Using the reconstructed source pixel fluxes we map each source pixel flux back to the image plane and - reconstruct the image image. + reconstruct the image data. This uses the unique mappings of every source pixel to image pixels, which is a quantity that is already computed when using the w-tilde formalism. @@ -163,13 +165,13 @@ def mapped_reconstructed_image_dict(self): Returns ------- Array2D - The reconstructed image image which the inversion fits. + The reconstructed image data which the inversion fits. """ - if self._mapped_reconstructed_image_dict is None: - return super().mapped_reconstructed_image_dict + if self._mapped_reconstructed_operated_data_dict is None: + return super().mapped_reconstructed_operated_data_dict - return self._mapped_reconstructed_image_dict + return self._mapped_reconstructed_operated_data_dict @property def reconstruction_noise_map(self): diff --git a/autoarray/inversion/plot/inversion_plotters.py b/autoarray/inversion/plot/inversion_plotters.py index 5b747f2f0..0db63f7b2 100644 --- a/autoarray/inversion/plot/inversion_plotters.py +++ b/autoarray/inversion/plot/inversion_plotters.py @@ -66,7 +66,7 @@ def mapper_plotter_from(self, mapper_index: int) -> MapperPlotter: visuals_2d=self.visuals_2d, ) - def figures_2d(self, reconstructed_image: bool = False): + def figures_2d(self, reconstructed_operated_data: bool = False): """ Plots the individual attributes of the plotter's `Inversion` object in 2D. @@ -75,15 +75,15 @@ def figures_2d(self, reconstructed_image: bool = False): Parameters ---------- - reconstructed_image + reconstructed_operated_data Whether to make a 2D plot (via `imshow`) of the reconstructed image data. """ - if reconstructed_image: + if reconstructed_operated_data: self.mat_plot_2d.plot_array( - array=self.inversion.mapped_reconstructed_image, + array=self.inversion.mapped_reconstructed_operated_data, visuals_2d=self.visuals_2d, auto_labels=AutoLabels( - title="Reconstructed Image", filename="reconstructed_image" + title="Reconstructed Image", filename="reconstructed_operated_data" ), ) @@ -91,7 +91,7 @@ def figures_2d_of_pixelization( self, pixelization_index: int = 0, data_subtracted: bool = False, - reconstructed_image: bool = False, + reconstructed_operated_data: bool = False, reconstruction: bool = False, reconstruction_noise_map: bool = False, signal_to_noise_map: bool = False, @@ -112,7 +112,7 @@ def figures_2d_of_pixelization( ---------- pixelization_index The index of the `Mapper` in the `Inversion`'s `linear_obj_list` that is plotted. - reconstructed_image + reconstructed_operated_data Whether to make a 2D plot (via `imshow`) of the mapper's reconstructed image data. reconstruction Whether to make a 2D plot (via `imshow` or `fill`) of the mapper's source-plane reconstruction. @@ -160,8 +160,8 @@ def figures_2d_of_pixelization( except AttributeError: pass - if reconstructed_image: - array = self.inversion.mapped_reconstructed_image_dict[ + if reconstructed_operated_data: + array = self.inversion.mapped_reconstructed_operated_data_dict[ mapper_plotter.mapper ] @@ -170,7 +170,7 @@ def figures_2d_of_pixelization( visuals_2d=self.visuals_2d, grid_indexes=mapper_plotter.mapper.over_sampler.uniform_over_sampled, auto_labels=AutoLabels( - title="Reconstructed Image", filename="reconstructed_image" + title="Reconstructed Image", filename="reconstructed_operated_data" ), ) @@ -320,14 +320,14 @@ def subplot_of_mapper( ) self.figures_2d_of_pixelization( - pixelization_index=mapper_index, reconstructed_image=True + pixelization_index=mapper_index, reconstructed_operated_data=True ) self.mat_plot_2d.use_log10 = True self.mat_plot_2d.contour = False self.figures_2d_of_pixelization( - pixelization_index=mapper_index, reconstructed_image=True + pixelization_index=mapper_index, reconstructed_operated_data=True ) self.mat_plot_2d.use_log10 = False @@ -340,7 +340,7 @@ def subplot_of_mapper( self.set_title(label="Mesh Pixel Grid Overlaid") self.figures_2d_of_pixelization( - pixelization_index=mapper_index, reconstructed_image=True + pixelization_index=mapper_index, reconstructed_operated_data=True ) self.set_title(label=None) @@ -424,7 +424,7 @@ def subplot_mappings( self.visuals_2d.indexes = indexes self.figures_2d_of_pixelization( - pixelization_index=pixelization_index, reconstructed_image=True + pixelization_index=pixelization_index, reconstructed_operated_data=True ) self.figures_2d_of_pixelization( diff --git a/test_autoarray/inversion/inversion/interferometer/test_interferometer.py b/test_autoarray/inversion/inversion/interferometer/test_interferometer.py index f09393346..774ea5c0d 100644 --- a/test_autoarray/inversion/inversion/interferometer/test_interferometer.py +++ b/test_autoarray/inversion/inversion/interferometer/test_interferometer.py @@ -55,7 +55,7 @@ def test__fast_chi_squared( residual_map = aa.util.fit.residual_map_from( data=interferometer_7_no_fft.data, - model_data=inversion.mapped_reconstructed_data, + model_data=inversion.mapped_reconstructed_operated_data, ) chi_squared_map = aa.util.fit.chi_squared_map_complex_from( diff --git a/test_autoarray/inversion/inversion/test_abstract.py b/test_autoarray/inversion/inversion/test_abstract.py index 75e123015..685fa9239 100644 --- a/test_autoarray/inversion/inversion/test_abstract.py +++ b/test_autoarray/inversion/inversion/test_abstract.py @@ -305,7 +305,7 @@ def test__reconstruction_dict(): assert (inversion.reconstruction_dict[mapper_1] == 2.0 * np.ones(3)).all() -def test__mapped_reconstructed_data(): +def test__mapped_reconstructed_data_dict(): linear_obj_0 = aa.m.MockLinearObj() mapped_reconstructed_data_dict = {linear_obj_0: np.ones(3)} @@ -341,59 +341,105 @@ def test__mapped_reconstructed_data(): assert (inversion.mapped_reconstructed_data == 3.0 * np.ones(2)).all() -def test__mapped_reconstructed_image(): +def test__mapped_reconstructed_operated_data_dict(): linear_obj_0 = aa.m.MockLinearObj() - mapped_reconstructed_image_dict = {linear_obj_0: np.ones(3)} + mapped_reconstructed_operated_data_dict = {linear_obj_0: np.ones(3)} # noinspection PyTypeChecker inversion = aa.m.MockInversion( - mapped_reconstructed_image_dict=mapped_reconstructed_image_dict, + mapped_reconstructed_operated_data_dict=mapped_reconstructed_operated_data_dict, reconstruction=np.ones(3), reconstruction_dict=[None], ) - assert (inversion.mapped_reconstructed_image_dict[linear_obj_0] == np.ones(3)).all() - assert (inversion.mapped_reconstructed_image == np.ones(3)).all() + assert ( + inversion.mapped_reconstructed_operated_data_dict[linear_obj_0] == np.ones(3) + ).all() + assert (inversion.mapped_reconstructed_operated_data == np.ones(3)).all() + + linear_obj_1 = aa.m.MockLinearObj() + + mapped_reconstructed_operated_data_dict = { + linear_obj_0: np.ones(2), + linear_obj_1: 2.0 * np.ones(2), + } + + # noinspection PyTypeChecker + inversion = aa.m.MockInversion( + mapped_reconstructed_operated_data_dict=mapped_reconstructed_operated_data_dict, + reconstruction=np.array([1.0, 1.0, 2.0, 2.0]), + reconstruction_dict=[None, None], + ) + + assert ( + inversion.mapped_reconstructed_operated_data_dict[linear_obj_0] == np.ones(2) + ).all() + assert ( + inversion.mapped_reconstructed_operated_data_dict[linear_obj_1] + == 2.0 * np.ones(2) + ).all() + assert (inversion.mapped_reconstructed_operated_data == 3.0 * np.ones(2)).all() + + +def test__mapped_reconstructed_operated_data(): + linear_obj_0 = aa.m.MockLinearObj() + + mapped_reconstructed_operated_data_dict = {linear_obj_0: np.ones(3)} + + # noinspection PyTypeChecker + inversion = aa.m.MockInversion( + mapped_reconstructed_operated_data_dict=mapped_reconstructed_operated_data_dict, + reconstruction=np.ones(3), + reconstruction_dict=[None], + ) + + assert ( + inversion.mapped_reconstructed_operated_data_dict[linear_obj_0] == np.ones(3) + ).all() + assert (inversion.mapped_reconstructed_operated_data == np.ones(3)).all() linear_obj_1 = aa.m.MockLinearObj() - mapped_reconstructed_image_dict = { + mapped_reconstructed_operated_data_dict = { linear_obj_0: np.ones(2), linear_obj_1: 2.0 * np.ones(2), } # noinspection PyTypeChecker inversion = aa.m.MockInversion( - mapped_reconstructed_image_dict=mapped_reconstructed_image_dict, + mapped_reconstructed_operated_data_dict=mapped_reconstructed_operated_data_dict, reconstruction=np.array([1.0, 1.0, 2.0, 2.0]), reconstruction_dict=[None, None], ) - assert (inversion.mapped_reconstructed_image_dict[linear_obj_0] == np.ones(2)).all() assert ( - inversion.mapped_reconstructed_image_dict[linear_obj_1] == 2.0 * np.ones(2) + inversion.mapped_reconstructed_operated_data_dict[linear_obj_0] == np.ones(2) + ).all() + assert ( + inversion.mapped_reconstructed_operated_data_dict[linear_obj_1] + == 2.0 * np.ones(2) ).all() - assert (inversion.mapped_reconstructed_image == 3.0 * np.ones(2)).all() + assert (inversion.mapped_reconstructed_operated_data == 3.0 * np.ones(2)).all() def test__data_subtracted_dict(): linear_obj_0 = aa.m.MockLinearObj() - mapped_reconstructed_data_dict = {linear_obj_0: np.ones(3)} + mapped_reconstructed_operated_data_dict = {linear_obj_0: np.ones(3)} # noinspection PyTypeChecker inversion = aa.m.MockInversion( data=3.0 * np.ones(3), linear_obj_list=[linear_obj_0], - mapped_reconstructed_data_dict=mapped_reconstructed_data_dict, + mapped_reconstructed_operated_data_dict=mapped_reconstructed_operated_data_dict, ) assert (inversion.data_subtracted_dict[linear_obj_0] == 3.0 * np.ones(3)).all() linear_obj_1 = aa.m.MockLinearObj() - mapped_reconstructed_data_dict = { + mapped_reconstructed_operated_data_dict = { linear_obj_0: np.ones(3), linear_obj_1: 2.0 * np.ones(3), } @@ -402,7 +448,7 @@ def test__data_subtracted_dict(): inversion = aa.m.MockInversion( data=3.0 * np.ones(3), linear_obj_list=[linear_obj_0, linear_obj_1], - mapped_reconstructed_data_dict=mapped_reconstructed_data_dict, + mapped_reconstructed_operated_data_dict=mapped_reconstructed_operated_data_dict, ) assert (inversion.data_subtracted_dict[linear_obj_0] == np.ones(3)).all() diff --git a/test_autoarray/inversion/inversion/test_factory.py b/test_autoarray/inversion/inversion/test_factory.py index e45e6e415..a679682a4 100644 --- a/test_autoarray/inversion/inversion/test_factory.py +++ b/test_autoarray/inversion/inversion/test_factory.py @@ -22,7 +22,9 @@ def test__inversion_imaging__via_linear_obj_func_list(masked_imaging_7x7_no_blur assert isinstance(inversion.linear_obj_list[0], aa.m.MockLinearObjFuncList) assert isinstance(inversion, aa.InversionImagingMapping) - assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) + assert inversion.mapped_reconstructed_operated_data == pytest.approx( + np.ones(9), 1.0e-4 + ) assert inversion.reconstruction == pytest.approx(np.array([2.0]), 1.0e-4) # Overwrites use_sparse_operator to false. @@ -38,7 +40,9 @@ def test__inversion_imaging__via_linear_obj_func_list(masked_imaging_7x7_no_blur assert isinstance(inversion.linear_obj_list[0], aa.m.MockLinearObjFuncList) assert isinstance(inversion, aa.InversionImagingMapping) - assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) + assert inversion.mapped_reconstructed_operated_data == pytest.approx( + np.ones(9), 1.0e-4 + ) assert inversion.reconstruction == pytest.approx(np.array([2.0]), 1.0e-4) # Works with multiple parameters @@ -56,7 +60,9 @@ def test__inversion_imaging__via_linear_obj_func_list(masked_imaging_7x7_no_blur assert isinstance(inversion.linear_obj_list[0], aa.m.MockLinearObjFuncList) assert isinstance(inversion, aa.InversionImagingMapping) assert inversion.reconstruction == pytest.approx(np.array([0.0, 2.0]), abs=1.0e-4) - assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) + assert inversion.mapped_reconstructed_operated_data == pytest.approx( + np.ones(9), 1.0e-4 + ) def test__inversion_imaging__via_mapper( @@ -78,7 +84,9 @@ def test__inversion_imaging__via_mapper( # assert inversion.log_det_curvature_reg_matrix_term == pytest.approx( # 4.609440907938719, 1.0e-4 # ) - assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) + assert inversion.mapped_reconstructed_operated_data == pytest.approx( + np.ones(9), 1.0e-4 + ) masked_imaging_7x7_no_blur_sparse_operator = ( masked_imaging_7x7_no_blur.apply_sparse_operator_cpu() @@ -93,7 +101,9 @@ def test__inversion_imaging__via_mapper( assert inversion.log_det_curvature_reg_matrix_term == pytest.approx( 7.257175708246, 1.0e-4 ) - assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) + assert inversion.mapped_reconstructed_operated_data == pytest.approx( + np.ones(9), 1.0e-4 + ) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, @@ -103,7 +113,9 @@ def test__inversion_imaging__via_mapper( assert isinstance(inversion.linear_obj_list[0], aa.MapperDelaunay) assert isinstance(inversion, aa.InversionImagingMapping) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx(10.6674, 1.0e-4) - assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) + assert inversion.mapped_reconstructed_operated_data == pytest.approx( + np.ones(9), 1.0e-4 + ) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur_sparse_operator, @@ -112,7 +124,9 @@ def test__inversion_imaging__via_mapper( assert isinstance(inversion.linear_obj_list[0], aa.MapperDelaunay) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx(10.6674, 1.0e-4) - assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) + assert inversion.mapped_reconstructed_operated_data == pytest.approx( + np.ones(9), 1.0e-4 + ) def test__inversion_imaging__via_regularizations( @@ -141,7 +155,9 @@ def test__inversion_imaging__via_regularizations( assert inversion.log_det_curvature_reg_matrix_term == pytest.approx( 10.66747, 1.0e-4 ) - assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) + assert inversion.mapped_reconstructed_operated_data == pytest.approx( + np.ones(9), 1.0e-4 + ) mapper = copy.copy(delaunay_mapper_9_3x3) mapper.regularization = regularization_adaptive_brightness @@ -155,7 +171,9 @@ def test__inversion_imaging__via_regularizations( assert inversion.log_det_curvature_reg_matrix_term == pytest.approx( 47.410169, 1.0e-4 ) - assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) + assert inversion.mapped_reconstructed_operated_data == pytest.approx( + np.ones(9), 1.0e-4 + ) def test__inversion_imaging__source_pixel_zeroed_indices( @@ -210,7 +228,9 @@ def test__inversion_imaging__via_linear_obj_func_and_mapper( np.array([2.0]), 1.0e-4 ) assert inversion.reconstruction_dict[rectangular_mapper_7x7_3x3][0] < 1.0e-4 - assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) + assert inversion.mapped_reconstructed_operated_data == pytest.approx( + np.ones(9), 1.0e-4 + ) masked_imaging_7x7_no_blur_sparse_operator = ( masked_imaging_7x7_no_blur.apply_sparse_operator_cpu() @@ -293,8 +313,11 @@ def test__inversion_imaging__compare_mapping_and_sparse_operator_values( assert inversion_sparse_operator.reconstruction == pytest.approx( inversion_mapping.reconstruction, 1.0e-4 ) - assert inversion_sparse_operator.mapped_reconstructed_image.array == pytest.approx( - inversion_mapping.mapped_reconstructed_image.array, 1.0e-4 + assert ( + inversion_sparse_operator.mapped_reconstructed_operated_data.array + == pytest.approx( + inversion_mapping.mapped_reconstructed_operated_data.array, 1.0e-4 + ) ) assert inversion_sparse_operator.log_det_curvature_reg_matrix_term == pytest.approx( inversion_mapping.log_det_curvature_reg_matrix_term @@ -328,9 +351,9 @@ def test__inversion_imaging__linear_obj_func_and_non_func_give_same_terms( masked_imaging_7x7_no_blur = copy.copy(masked_imaging_7x7_no_blur) - masked_imaging_7x7_no_blur.data -= inversion.mapped_reconstructed_data_dict[ - linear_obj - ] + masked_imaging_7x7_no_blur.data -= ( + inversion.mapped_reconstructed_operated_data_dict[linear_obj] + ) inversion_no_linear_func = aa.Inversion( dataset=masked_imaging_7x7_no_blur, @@ -395,8 +418,8 @@ def test__inversion_imaging__linear_obj_func_with_sparse_operator( assert inversion_mapping.reconstruction == pytest.approx( inversion_sparse_operator.reconstruction, 1.0e-4 ) - assert inversion_mapping.mapped_reconstructed_image.array == pytest.approx( - inversion_sparse_operator.mapped_reconstructed_image.array, 1.0e-4 + assert inversion_mapping.mapped_reconstructed_operated_data.array == pytest.approx( + inversion_sparse_operator.mapped_reconstructed_operated_data.array, 1.0e-4 ) linear_obj_1 = aa.m.MockLinearObjFuncList( @@ -453,11 +476,11 @@ def test__inversion_interferometer__via_mapper( assert isinstance(inversion.linear_obj_list[0], aa.MapperRectangularUniform) assert isinstance(inversion, aa.InversionInterferometerMapping) - assert inversion.mapped_reconstructed_data == pytest.approx( + assert inversion.mapped_reconstructed_operated_data == pytest.approx( 1.0 + 0.0j * np.ones(shape=(7,)), 1.0e-4 ) - assert (np.imag(inversion.mapped_reconstructed_data) < 0.0001).all() - assert (np.imag(inversion.mapped_reconstructed_data) > 0.0).all() + assert (np.imag(inversion.mapped_reconstructed_operated_data) < 0.0001).all() + assert (np.imag(inversion.mapped_reconstructed_operated_data) > 0.0).all() assert inversion.log_det_curvature_reg_matrix_term == pytest.approx(10.2116, 1.0e-4) inversion = aa.Inversion( @@ -468,11 +491,11 @@ def test__inversion_interferometer__via_mapper( assert isinstance(inversion.linear_obj_list[0], aa.MapperDelaunay) assert isinstance(inversion, aa.InversionInterferometerMapping) - assert inversion.mapped_reconstructed_data == pytest.approx( + assert inversion.mapped_reconstructed_operated_data == pytest.approx( 1.0 + 0.0j * np.ones(shape=(7,)), 1.0e-4 ) - assert (np.imag(inversion.mapped_reconstructed_data) < 0.0001).all() - assert (np.imag(inversion.mapped_reconstructed_data) > 0.0).all() + assert (np.imag(inversion.mapped_reconstructed_operated_data) < 0.0001).all() + assert (np.imag(inversion.mapped_reconstructed_operated_data) > 0.0).all() assert inversion.log_det_curvature_reg_matrix_term == pytest.approx( 14.49772, 1.0e-4 ) @@ -540,13 +563,15 @@ def test__inversion_matrices__x2_mappers( ) assert inversion.reconstruction[13] == pytest.approx(0.49999703908867, 1.0e-4) - assert inversion.mapped_reconstructed_data_dict[rectangular_mapper_7x7_3x3][ - 4 - ] == pytest.approx(0.5000029, 1.0e-4) - assert inversion.mapped_reconstructed_data_dict[delaunay_mapper_9_3x3][ + assert inversion.mapped_reconstructed_operated_data_dict[ + rectangular_mapper_7x7_3x3 + ][4] == pytest.approx(0.5000029, 1.0e-4) + assert inversion.mapped_reconstructed_operated_data_dict[delaunay_mapper_9_3x3][ 3 ] == pytest.approx(0.49999704, 1.0e-4) - assert inversion.mapped_reconstructed_image[4] == pytest.approx(0.99999998, 1.0e-4) + assert inversion.mapped_reconstructed_operated_data[4] == pytest.approx( + 0.99999998, 1.0e-4 + ) def test__inversion_imaging__positive_only_solver(masked_imaging_7x7_no_blur): @@ -566,7 +591,9 @@ def test__inversion_imaging__positive_only_solver(masked_imaging_7x7_no_blur): assert isinstance(inversion.linear_obj_list[0], aa.m.MockLinearObjFuncList) assert isinstance(inversion, aa.InversionImagingMapping) - assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) + assert inversion.mapped_reconstructed_operated_data == pytest.approx( + np.ones(9), 1.0e-4 + ) assert inversion.reconstruction == pytest.approx(np.array([2.0]), 1.0e-4) diff --git a/test_autoarray/inversion/inversion/test_inversion_util.py b/test_autoarray/inversion/inversion/test_inversion_util.py index cd8873e0b..db1bba80b 100644 --- a/test_autoarray/inversion/inversion/test_inversion_util.py +++ b/test_autoarray/inversion/inversion/test_inversion_util.py @@ -93,25 +93,25 @@ def test__mapped_reconstructed_data_via_mapping_matrix_from(): reconstruction = np.array([1.0, 1.0, 2.0]) - mapped_reconstructed_data = ( + mapped_reconstructed_operated_data = ( aa.util.inversion.mapped_reconstructed_data_via_mapping_matrix_from( mapping_matrix=mapping_matrix, reconstruction=reconstruction ) ) - assert (mapped_reconstructed_data == np.array([1.0, 1.0, 2.0])).all() + assert (mapped_reconstructed_operated_data == np.array([1.0, 1.0, 2.0])).all() mapping_matrix = np.array([[0.25, 0.50, 0.25], [0.0, 1.0, 0.0], [0.0, 0.25, 0.75]]) reconstruction = np.array([1.0, 1.0, 2.0]) - mapped_reconstructed_data = ( + mapped_reconstructed_operated_data = ( aa.util.inversion.mapped_reconstructed_data_via_mapping_matrix_from( mapping_matrix=mapping_matrix, reconstruction=reconstruction ) ) - assert (mapped_reconstructed_data == np.array([1.25, 1.0, 1.75])).all() + assert (mapped_reconstructed_operated_data == np.array([1.25, 1.0, 1.75])).all() def test__mapped_reconstructed_data_via_image_to_pix_unique_from(): @@ -134,14 +134,14 @@ def test__mapped_reconstructed_data_via_image_to_pix_unique_from(): reconstruction = np.array([1.0, 1.0, 2.0]) - mapped_reconstructed_data = aa.util.inversion_imaging_numba.mapped_reconstructed_data_via_image_to_pix_unique_from( + mapped_reconstructed_operated_data = aa.util.inversion_imaging_numba.mapped_reconstructed_data_via_image_to_pix_unique_from( data_to_pix_unique=data_to_pix_unique.astype("int"), data_weights=data_weights, pix_lengths=pix_lengths.astype("int"), reconstruction=reconstruction, ) - assert (mapped_reconstructed_data == np.array([1.0, 1.0, 2.0])).all() + assert (mapped_reconstructed_operated_data == np.array([1.0, 1.0, 2.0])).all() pix_indexes_for_sub_slim_index = np.array( [[0], [1], [1], [2], [1], [1], [1], [1], [1], [2], [2], [2]] @@ -164,14 +164,14 @@ def test__mapped_reconstructed_data_via_image_to_pix_unique_from(): reconstruction = np.array([1.0, 1.0, 2.0]) - mapped_reconstructed_data = aa.util.inversion_imaging_numba.mapped_reconstructed_data_via_image_to_pix_unique_from( + mapped_reconstructed_operated_data = aa.util.inversion_imaging_numba.mapped_reconstructed_data_via_image_to_pix_unique_from( data_to_pix_unique=data_to_pix_unique.astype("int"), data_weights=data_weights, pix_lengths=pix_lengths.astype("int"), reconstruction=reconstruction, ) - assert (mapped_reconstructed_data == np.array([1.25, 1.0, 1.75])).all() + assert (mapped_reconstructed_operated_data == np.array([1.25, 1.0, 1.75])).all() def test__preconditioner_matrix_via_mapping_matrix_from(): diff --git a/test_autoarray/inversion/plot/test_inversion_plotters.py b/test_autoarray/inversion/plot/test_inversion_plotters.py index 0ed897c7f..225470cb8 100644 --- a/test_autoarray/inversion/plot/test_inversion_plotters.py +++ b/test_autoarray/inversion/plot/test_inversion_plotters.py @@ -28,19 +28,19 @@ def test__individual_attributes_are_output_for_all_mappers( mat_plot_2d=aplt.MatPlot2D(output=aplt.Output(path=plot_path, format="png")), ) - inversion_plotter.figures_2d(reconstructed_image=True) + inversion_plotter.figures_2d(reconstructed_operated_data=True) - assert path.join(plot_path, "reconstructed_image.png") in plot_patch.paths + assert path.join(plot_path, "reconstructed_operated_data.png") in plot_patch.paths inversion_plotter.figures_2d_of_pixelization( pixelization_index=0, - reconstructed_image=True, + reconstructed_operated_data=True, reconstruction=True, reconstruction_noise_map=True, regularization_weights=True, ) - assert path.join(plot_path, "reconstructed_image.png") in plot_patch.paths + assert path.join(plot_path, "reconstructed_operated_data.png") in plot_patch.paths assert path.join(plot_path, "reconstruction.png") in plot_patch.paths assert path.join(plot_path, "reconstruction_noise_map.png") in plot_patch.paths assert path.join(plot_path, "regularization_weights.png") in plot_patch.paths