Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion autoarray/config/visualize/plots.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ inversion: # Settings for plots of inversions (e
sub_pixels_per_image_pixels: false # Plot the number of sub pixels per masked data pixels?
mesh_pixels_per_image_pixels: false # Plot the number of image-plane mesh pixels per masked data pixels?
image_pixels_per_mesh_pixels: false # Plot the number of image pixels in each pixel of the mesh?
reconstructed_image: false # Plot image of the reconstructed data (e.g. in the image-plane)?
reconstructed_operated_data: false # Plot image of the reconstructed data (e.g. in the image-plane)?
reconstruction: false # Plot the reconstructed inversion (e.g. the pixelization's mesh in the source-plane)?
regularization_weights: false # Plot the effective regularization weight of every inversion mesh pixel?
fit_interferometer: # Settings for plots of fits to interferometer datasets (e.g. FitInterferometerPlotter).
Expand Down
24 changes: 4 additions & 20 deletions autoarray/inversion/inversion/abstract.py
Original file line number Diff line number Diff line change
Expand Up @@ -514,25 +514,9 @@ def source_quantity_dict_from(
return source_quantity_dict

@property
def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]:
def mapped_reconstructed_operated_data_dict(self) -> Dict[LinearObj, Array2D]:
raise NotImplementedError

@property
def mapped_reconstructed_image_dict(self) -> Dict[LinearObj, Array2D]:
"""
Using the reconstructed source pixel fluxes we map each source pixel flux back to the image plane and
reconstruct the image data.

This uses the unique mappings of every source pixel to image pixels, which is a quantity that is already
computed when using the w-tilde formalism.

Returns
-------
Array2D
The reconstructed image data which the inversion fits.
"""
return self.mapped_reconstructed_data_dict

@property
def mapped_reconstructed_data(self) -> Union[Array2D, Visibilities]:
"""
Expand All @@ -550,7 +534,7 @@ def mapped_reconstructed_data(self) -> Union[Array2D, Visibilities]:
return sum(self.mapped_reconstructed_data_dict.values())

@property
def mapped_reconstructed_image(self) -> Array2D:
def mapped_reconstructed_operated_data(self) -> Union[Array2D, Visibilities]:
"""
Using the reconstructed source pixel fluxes we map each source pixel flux back to the image plane and
reconstruct the image data.
Expand All @@ -563,7 +547,7 @@ def mapped_reconstructed_image(self) -> Array2D:
Array2D
The reconstructed image data which the inversion fits.
"""
Comment on lines 537 to 549
Copy link

Copilot AI Feb 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The mapped_reconstructed_operated_data docstring is currently identical to mapped_reconstructed_data and does not explain what operation(s) are included (e.g. PSF convolution for imaging, NUFFT for interferometer). Please update it to clearly define “operated” so users know this is the data-frame model used for fitting.

Copilot uses AI. Check for mistakes.
return sum(self.mapped_reconstructed_image_dict.values())
return sum(self.mapped_reconstructed_operated_data_dict.values())

@property
def data_subtracted_dict(self) -> Dict[LinearObj, Array2D]:
Expand All @@ -589,7 +573,7 @@ def data_subtracted_dict(self) -> Dict[LinearObj, Array2D]:
if linear_obj != linear_obj_other:
data_subtracted_dict[
linear_obj
] -= self.mapped_reconstructed_image_dict[linear_obj_other]
] -= self.mapped_reconstructed_operated_data_dict[linear_obj_other]

return data_subtracted_dict

Expand Down
37 changes: 37 additions & 0 deletions autoarray/inversion/inversion/imaging/abstract.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,19 @@ def __init__(
def psf(self):
return self.dataset.psf

@property
def mapping_matrix_list(self) -> List[np.ndarray]:
"""
The `mapping_matrix` of a linear object describes the mappings between the observed data's values and
the linear object's model, before an operation like a convolution is applied.

This is used to construct the simultaneous linear equations which reconstruct the data.

This property returns a list containing each linear object's unoperated (unconvolved) `mapping_matrix`
as defined on the corresponding `LinearObj` instance.
"""
return [linear_obj.mapping_matrix for linear_obj in self.linear_obj_list]

@property
def operated_mapping_matrix_list(self) -> List[np.ndarray]:
"""
Expand Down Expand Up @@ -115,6 +128,30 @@ def _updated_cls_key_dict_from(self, cls: Type, preload_dict: Dict) -> Dict:

return cls_dict

@property
def linear_func_mapping_matrix_dict(self) -> Dict:
"""
The `operated_mapping_matrix` of a linear object describes the mappings between the observed data's values and
the linear objects model, including a 2D convolution operation. It is described fully in the method
`operated_mapping_matrix`.

This property returns a dictionary mapping every linear func object to its corresponded operated mapping
matrix, which is used for constructing the matrices that perform the linear inversion in an efficent way
for the psf precision operator calculation.

Returns
-------
A dictionary mapping every linear function object to its operated mapping matrix.
"""
Comment on lines +134 to +145
Copy link

Copilot AI Feb 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The linear_func_mapping_matrix_dict docstring describes operated (PSF-convolved) mapping matrices, but the implementation returns each linear func’s unoperated mapping_matrix. Please update the docstring (and fix typos like “corresponded/efficent”) so it clearly distinguishes this from linear_func_operated_mapping_matrix_dict.

Copilot uses AI. Check for mistakes.

linear_func_mapping_matrix_dict = {}

for linear_func in self.cls_list_from(cls=AbstractLinearObjFuncList):

linear_func_mapping_matrix_dict[linear_func] = linear_func.mapping_matrix

return linear_func_mapping_matrix_dict

@property
def linear_func_operated_mapping_matrix_dict(self) -> Dict:
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ def curvature_matrix_with_added_to_diag_from(
return curvature_matrix.at[inds, inds].add(value)


def mapped_reconstructed_image_via_sparse_operator_from(
def mapped_reconstructed_operated_data_via_sparse_operator_from(
reconstruction, # (S,)
rows,
cols,
Expand Down
88 changes: 67 additions & 21 deletions autoarray/inversion/inversion/imaging/mapping.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,44 @@ def curvature_matrix(self):
xp=self._xp,
)

def _mapped_reconstructed_data_dict_from(
self,
mapping_matrix_list,
) -> Dict["LinearObj", "Array2D"]:
"""
Shared implementation for mapping a reconstruction to image-plane arrays
using a provided list of mapping matrices (operated or unoperated).
"""
mapped_reconstructed_data_dict = {}

reconstruction_dict = self.source_quantity_dict_from(
source_quantity=self.reconstruction
)

for index, linear_obj in enumerate(self.linear_obj_list):
reconstruction = reconstruction_dict[linear_obj]

mapped_reconstructed_operated_data = (
inversion_util.mapped_reconstructed_data_via_mapping_matrix_from(
mapping_matrix=mapping_matrix_list[index],
reconstruction=reconstruction,
xp=self._xp,
)
)

mapped_reconstructed_operated_data = Array2D(
values=mapped_reconstructed_operated_data,
mask=self.mask,
)

mapped_reconstructed_data_dict[linear_obj] = (
mapped_reconstructed_operated_data
)

return mapped_reconstructed_data_dict

@property
def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]:
def mapped_reconstructed_data_dict(self) -> Dict["LinearObj", "Array2D"]:
"""
When constructing the simultaneous linear equations (via vectors and matrices) the quantities of each individual
linear object (e.g. their `mapping_matrix`) are combined into single ndarrays via stacking. This does not track
Expand All @@ -116,6 +152,9 @@ def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]:
This function converts an ndarray of a `reconstruction` to a dictionary of ndarrays containing each linear
object's reconstructed data values, where the keys are the instances of each mapper in the inversion.

The images are the unconvolved reconstructed data values, meaning they are the solved for reconstruction
with PSF operations removed.

To perform this mapping the `mapping_matrix` is used, which straightforwardly describes how every value of
the `reconstruction` maps to pixels in the data-frame after the 2D convolution operation has been performed.
Comment on lines +156 to 159
Copy link

Copilot AI Feb 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In mapped_reconstructed_data_dict the docstring says the mapping_matrix maps to the data-frame “after the 2D convolution operation has been performed”, but this property now uses mapping_matrix_list (unoperated matrices). The description should be updated to reflect that these mapped values are before PSF convolution (and reserve the “after convolution” wording for mapped_reconstructed_operated_data_dict).

Suggested change
with PSF operations removed.
To perform this mapping the `mapping_matrix` is used, which straightforwardly describes how every value of
the `reconstruction` maps to pixels in the data-frame after the 2D convolution operation has been performed.
with PSF operations removed (that is, before any PSF / 2D convolution is applied).
To perform this mapping the (unoperated) `mapping_matrix` is used, which straightforwardly describes how every
value of the `reconstruction` maps to pixels in the data-frame prior to the 2D convolution operation.

Copilot uses AI. Check for mistakes.

Expand All @@ -125,30 +164,37 @@ def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]:
The reconstruction (in the source frame) whose values are mapped to a dictionary of values for each
individual mapper (in the image-plane).
"""

mapped_reconstructed_data_dict = {}

reconstruction_dict = self.source_quantity_dict_from(
source_quantity=self.reconstruction
return self._mapped_reconstructed_data_dict_from(
mapping_matrix_list=self.mapping_matrix_list
)

operated_mapping_matrix_list = self.operated_mapping_matrix_list
@property
def mapped_reconstructed_operated_data_dict(self) -> Dict["LinearObj", "Array2D"]:
"""
When constructing the simultaneous linear equations (via vectors and matrices) the quantities of each individual
linear object (e.g. their `mapping_matrix`) are combined into single ndarrays via stacking. This does not track
which quantities belong to which linear objects, therefore the linear equation's solutions (which are returned
as ndarrays) do not contain information on which linear object(s) they correspond to.

for index, linear_obj in enumerate(self.linear_obj_list):
reconstruction = reconstruction_dict[linear_obj]
For example, consider if two `Mapper` objects with 50 and 100 source pixels are used in an `Inversion`.
The `reconstruction` (which contains the solved for source pixels values) is an ndarray of shape [150], but
the ndarray itself does not track which values belong to which `Mapper`.

mapped_reconstructed_image = (
inversion_util.mapped_reconstructed_data_via_mapping_matrix_from(
mapping_matrix=operated_mapping_matrix_list[index],
reconstruction=reconstruction,
xp=self._xp,
)
)
This function converts an ndarray of a `reconstruction` to a dictionary of ndarrays containing each linear
object's reconstructed data values, where the keys are the instances of each mapper in the inversion.

mapped_reconstructed_image = Array2D(
values=mapped_reconstructed_image, mask=self.mask
)
The images are the convolved reconstructed data values, meaning they are the solved for reconstruction with PSF
operations included.

mapped_reconstructed_data_dict[linear_obj] = mapped_reconstructed_image
To perform this mapping the `mapping_matrix` is used, which straightforwardly describes how every value of
the `reconstruction` maps to pixels in the data-frame after the 2D convolution operation has been performed.

return mapped_reconstructed_data_dict
Parameters
----------
reconstruction
The reconstruction (in the source frame) whose values are mapped to a dictionary of values for each
individual mapper (in the image-plane).
"""
return self._mapped_reconstructed_data_dict_from(
mapping_matrix_list=self.operated_mapping_matrix_list
)
Loading
Loading