diff --git a/autoarray/inversion/inversion/settings.py b/autoarray/inversion/inversion/settings.py index f81b839c7..b987099be 100644 --- a/autoarray/inversion/inversion/settings.py +++ b/autoarray/inversion/inversion/settings.py @@ -17,10 +17,6 @@ def __init__( no_regularization_add_to_curvature_diag_value: float = None, use_w_tilde_numpy: bool = False, use_source_loop: bool = False, - image_mesh_min_mesh_pixels_per_pixel=None, - image_mesh_min_mesh_number: int = 5, - image_mesh_adapt_background_percent_threshold: float = None, - image_mesh_adapt_background_percent_check: float = 0.8, tolerance: float = 1e-8, maxiter: int = 250, ): @@ -46,19 +42,6 @@ def __init__( which exploit sparsity to do the calculation normally in a more efficient way). use_source_loop Shhhh its a secret. - image_mesh_min_mesh_pixels_per_pixel - If not None, the image-mesh must place this many mesh pixels per image pixels in the N highest weighted - regions of the adapt data, or an `InversionException` is raised. This can be used to force the image-mesh - to cluster large numbers of source pixels to the adapt-datas brightest regions. - image_mesh_min_mesh_number - The value N given above in the docstring for `image_mesh_min_mesh_pixels_per_pixel`, indicating how many - image pixels are checked for having a threshold number of mesh pixels. - image_mesh_adapt_background_percent_threshold - If not None, the image-mesh must place this percentage of mesh-pixels in the background regions of the - `adapt_data`, where the background is the `image_mesh_adapt_background_percent_check` masked data pixels - with the lowest values. - image_mesh_adapt_background_percent_check - The percentage of masked data pixels which are checked for the background criteria. tolerance For an interferometer inversion using the linear operators method, sets the tolerance of the solver (this input does nothing for dataset data and other interferometer methods). @@ -73,14 +56,6 @@ def __init__( self._no_regularization_add_to_curvature_diag_value = ( no_regularization_add_to_curvature_diag_value ) - self.image_mesh_min_mesh_pixels_per_pixel = image_mesh_min_mesh_pixels_per_pixel - self.image_mesh_min_mesh_number = image_mesh_min_mesh_number - self.image_mesh_adapt_background_percent_threshold = ( - image_mesh_adapt_background_percent_threshold - ) - self.image_mesh_adapt_background_percent_check = ( - image_mesh_adapt_background_percent_check - ) self.tolerance = tolerance self.maxiter = maxiter diff --git a/autoarray/inversion/mock/mock_image_mesh.py b/autoarray/inversion/mock/mock_image_mesh.py index 1532af497..bc87c72df 100644 --- a/autoarray/inversion/mock/mock_image_mesh.py +++ b/autoarray/inversion/mock/mock_image_mesh.py @@ -12,15 +12,3 @@ def __init__(self, image_plane_mesh_grid=None): super().__init__() self.image_plane_mesh_grid = image_plane_mesh_grid - - def image_plane_mesh_grid_from( - self, mask: Mask2D, adapt_data: Optional[np.ndarray], settings=None - ) -> Grid2DIrregular: - if adapt_data is not None and self.image_plane_mesh_grid is not None: - return adapt_data * self.image_plane_mesh_grid - - return self.image_plane_mesh_grid - - @property - def uses_adapt_images(self) -> bool: - return False diff --git a/autoarray/inversion/mock/mock_mesh.py b/autoarray/inversion/mock/mock_mesh.py index 721c6a990..5ee3cea88 100644 --- a/autoarray/inversion/mock/mock_mesh.py +++ b/autoarray/inversion/mock/mock_mesh.py @@ -37,13 +37,8 @@ def image_plane_mesh_grid_from( self, mask: Mask2D, adapt_data, - settings=None, ): if adapt_data is not None and self.image_plane_mesh_grid is not None: return adapt_data * self.image_plane_mesh_grid return self.image_plane_mesh_grid - - @property - def requires_image_mesh(self): - return False diff --git a/autoarray/inversion/mock/mock_pixelization.py b/autoarray/inversion/mock/mock_pixelization.py index a71abebf7..c30bb3867 100644 --- a/autoarray/inversion/mock/mock_pixelization.py +++ b/autoarray/inversion/mock/mock_pixelization.py @@ -7,13 +7,10 @@ def __init__( self, mesh=None, regularization=None, - image_mesh=None, mapper=None, image_plane_mesh_grid=None, ): - super().__init__( - mesh=mesh, regularization=regularization, image_mesh=image_mesh - ) + super().__init__(mesh=mesh, regularization=regularization) self.mapper = mapper self.image_plane_mesh_grid = image_plane_mesh_grid diff --git a/autoarray/inversion/pixelization/image_mesh/abstract.py b/autoarray/inversion/pixelization/image_mesh/abstract.py index e071379fd..fc7128df8 100644 --- a/autoarray/inversion/pixelization/image_mesh/abstract.py +++ b/autoarray/inversion/pixelization/image_mesh/abstract.py @@ -18,6 +18,7 @@ def __init__(self): """ An abstract image mesh, which is used by pixelizations to determine the (y,x) mesh coordinates from image data. + """ pass @@ -29,7 +30,6 @@ def image_plane_mesh_grid_from( self, mask: Mask2D, adapt_data: Optional[np.ndarray] = None, - settings: SettingsInversion = None, ) -> Grid2DIrregular: raise NotImplementedError @@ -65,131 +65,3 @@ def mesh_pixels_per_image_pixels_from( ) return Array2D(values=mesh_pixels_per_image_pixels, mask=mask) - - def check_mesh_pixels_per_image_pixels( - self, mask: Mask2D, mesh_grid: Grid2DIrregular, settings: SettingsInversion - ): - """ - Checks the number of mesh pixels in every image pixel and raises an `InversionException` if there are fewer - mesh pixels inside a certain number of image-pixels than the input settings. - - This allows a user to force a model-fit to use image-mesh's which cluster a large number of mesh pixels to - the brightest regions of the image data (E.g. the highst weighted regions). - - The check works as follows: - - 1) Compute the 2D array of the number of mesh pixels in every masked data image pixel. - 2) Find the number of mesh pixels in the N data pixels with the larger number of mesh pixels, where N is - given by `settings.image_mesh_min_mesh_number`. For example, if `settings.image_mesh_min_mesh_number=5` then - the number of mesh pixels in the 5 data pixels with the most data pixels is computed. - 3) Compare the lowest value above to the value `settings.image_mesh_min_mesh_pixels_per_pixel`. If the value is - below this value, raise an `InversionException`. - - Therefore, by settings `settings.image_mesh_min_mesh_pixels_per_pixel` to a value above 1 the code is forced - to adapt the image mesh enough to put many mesh pixels in the brightest image pixels. - - Parameters - ---------- - mask - The mask of the dataset being analysed, which the pixelization grid maps too. The number of - mesh pixels mapped inside each of this mask's image-pixels is returned. - mesh_grid - The image mesh-grid computed by the class which adapts to the data's mask. The number of image mesh pixels - that fall within each of the data's mask pixels is returned. - settings - The inversion settings, which have the criteria dictating if the image-mesh has clustered enough or if - an exception is raised. - """ - - if os.environ.get("PYAUTOFIT_TEST_MODE") == "1": - return - - if settings is not None: - if settings.image_mesh_min_mesh_pixels_per_pixel is not None: - mesh_pixels_per_image_pixels = self.mesh_pixels_per_image_pixels_from( - mask=mask, mesh_grid=mesh_grid - ) - - indices_of_highest_values = np.argsort(mesh_pixels_per_image_pixels)[ - -settings.image_mesh_min_mesh_number : - ] - lowest_mesh_pixels = np.min( - mesh_pixels_per_image_pixels[indices_of_highest_values] - ) - - if lowest_mesh_pixels < settings.image_mesh_min_mesh_pixels_per_pixel: - raise exc.InversionException() - - return mesh_grid - - def check_adapt_background_pixels( - self, - mask: Mask2D, - mesh_grid: Grid2DIrregular, - adapt_data: Optional[np.ndarray], - settings: SettingsInversion, - ): - """ - Checks the number of mesh pixels in the background of the image-mesh and raises an `InversionException` if - there are fewer mesh pixels in the background than the input settings. - - This allows a user to force a model-fit to use image-mesh's which cluster a minimum number of mesh pixels to - the faintest regions of the image data (E.g. the lowest weighted regions). This prevents too few image-mesh - pixels being allocated to the background of the data. - - The check works as follows: - - 1) Find all pixels in the background of the `adapt_data`, which are N pixels with the lowest values, where N is - a percentage given by `settings.image_mesh_adapt_background_percent_check`. If N is 50%, then the half of - pixels in `adapt_data` with the lowest values will be checked. - 2) Sum the total number of mesh pixels in these background pixels, thereby estimating the number of mesh pixels - assigned to background pixels. - 3) Compare this value to the total number of mesh pixels multiplied - by `settings.image_mesh_adapt_background_percent_threshold` and raise an `InversionException` if the number - of mesh pixels is below this value, meaning the background did not have sufficient mesh pixels in it. - - Therefore, by setting `settings.image_mesh_adapt_background_percent_threshold` the code is forced - to adapt the image mesh in a way that places many mesh pixels in the background regions. - - Parameters - ---------- - mask - The mask of the dataset being analysed, which the pixelization grid maps too. The number of - mesh pixels mapped inside each of this mask's image-pixels is returned. - mesh_grid - The image mesh-grid computed by the class which adapts to the data's mask. The number of image mesh pixels - that fall within each of the data's mask pixels is returned. - adapt_data - A image which represents one or more components in the masked 2D data in the image-plane. - settings - The inversion settings, which have the criteria dictating if the image-mesh has clustered enough or if - an exception is raised. - """ - - if os.environ.get("PYAUTOFIT_TEST_MODE") == "1": - return - - if settings is not None: - if settings.image_mesh_adapt_background_percent_threshold is not None: - pixels = mesh_grid.shape[0] - - pixels_in_background = int( - mask.shape_slim * settings.image_mesh_adapt_background_percent_check - ) - - indices_of_lowest_values = np.argsort(adapt_data)[:pixels_in_background] - mask_background = np.zeros_like(adapt_data, dtype=bool) - mask_background[indices_of_lowest_values] = True - - mesh_pixels_per_image_pixels = self.mesh_pixels_per_image_pixels_from( - mask=mask, mesh_grid=mesh_grid - ) - - mesh_pixels_in_background = sum( - mesh_pixels_per_image_pixels[mask_background] - ) - - if mesh_pixels_in_background < ( - pixels * settings.image_mesh_adapt_background_percent_threshold - ): - raise exc.InversionException() diff --git a/autoarray/inversion/pixelization/image_mesh/hilbert.py b/autoarray/inversion/pixelization/image_mesh/hilbert.py index 119db098d..95e13a824 100644 --- a/autoarray/inversion/pixelization/image_mesh/hilbert.py +++ b/autoarray/inversion/pixelization/image_mesh/hilbert.py @@ -1,5 +1,6 @@ from __future__ import annotations import numpy as np +import os from typing import Optional @@ -212,6 +213,20 @@ def __init__( weight_power The power the weight values are raised too, which allows more pixels to be drawn from the higher weight regions of the adapt image. + + image_mesh_min_mesh_pixels_per_pixel + If not None, the image-mesh must place this many mesh pixels per image pixels in the N highest weighted + regions of the adapt data, or an `InversionException` is raised. This can be used to force the image-mesh + to cluster large numbers of source pixels to the adapt-datas brightest regions. + image_mesh_min_mesh_number + The value N given above in the docstring for `image_mesh_min_mesh_pixels_per_pixel`, indicating how many + image pixels are checked for having a threshold number of mesh pixels. + image_mesh_adapt_background_percent_threshold + If not None, the image-mesh must place this percentage of mesh-pixels in the background regions of the + `adapt_data`, where the background is the `image_mesh_adapt_background_percent_check` masked data pixels + with the lowest values. + image_mesh_adapt_background_percent_check + The percentage of masked data pixels which are checked for the background criteria. """ super().__init__( @@ -224,7 +239,6 @@ def image_plane_mesh_grid_from( self, mask: Mask2D, adapt_data: Optional[np.ndarray], - settings: SettingsInversion = None, ) -> Grid2DIrregular: """ Returns an image mesh by running the Hilbert curve on the weight map. @@ -274,14 +288,139 @@ def image_plane_mesh_grid_from( gridy=grid_hb[:, 0], ) - mesh_grid = Grid2DIrregular(values=np.stack((drawn_y, drawn_x), axis=-1)) + return Grid2DIrregular(values=np.stack((drawn_y, drawn_x), axis=-1)) - self.check_mesh_pixels_per_image_pixels( - mask=mask, mesh_grid=mesh_grid, settings=settings - ) + def check_mesh_pixels_per_image_pixels( + self, + mask: Mask2D, + mesh_grid: Grid2DIrregular, + image_mesh_min_mesh_pixels_per_pixel=None, + image_mesh_min_mesh_number: int = 5, + image_mesh_adapt_background_percent_threshold: float = None, + image_mesh_adapt_background_percent_check: float = 0.8, + ): + """ + Checks the number of mesh pixels in every image pixel and raises an `InversionException` if there are fewer + mesh pixels inside a certain number of image-pixels than the input settings. - self.check_adapt_background_pixels( - mask=mask, mesh_grid=mesh_grid, adapt_data=adapt_data, settings=settings - ) + This allows a user to force a model-fit to use image-mesh's which cluster a large number of mesh pixels to + the brightest regions of the image data (E.g. the highst weighted regions). + + The check works as follows: + + 1) Compute the 2D array of the number of mesh pixels in every masked data image pixel. + 2) Find the number of mesh pixels in the N data pixels with the larger number of mesh pixels, where N is + given by `image_mesh_min_mesh_number`. For example, if `image_mesh_min_mesh_number=5` then + the number of mesh pixels in the 5 data pixels with the most data pixels is computed. + 3) Compare the lowest value above to the value `image_mesh_min_mesh_pixels_per_pixel`. If the value is + below this value, raise an `InversionException`. + + Therefore, by settings `image_mesh_min_mesh_pixels_per_pixel` to a value above 1 the code is forced + to adapt the image mesh enough to put many mesh pixels in the brightest image pixels. + + Parameters + ---------- + mask + The mask of the dataset being analysed, which the pixelization grid maps too. The number of + mesh pixels mapped inside each of this mask's image-pixels is returned. + mesh_grid + The image mesh-grid computed by the class which adapts to the data's mask. The number of image mesh pixels + that fall within each of the data's mask pixels is returned. + settings + The inversion settings, which have the criteria dictating if the image-mesh has clustered enough or if + an exception is raised. + """ + + if os.environ.get("PYAUTOFIT_TEST_MODE") == "1": + return + + if image_mesh_min_mesh_pixels_per_pixel is not None: + mesh_pixels_per_image_pixels = self.mesh_pixels_per_image_pixels_from( + mask=mask, mesh_grid=mesh_grid + ) + + indices_of_highest_values = np.argsort(mesh_pixels_per_image_pixels)[ + -image_mesh_min_mesh_number: + ] + lowest_mesh_pixels = np.min( + mesh_pixels_per_image_pixels[indices_of_highest_values] + ) + + if lowest_mesh_pixels < image_mesh_min_mesh_pixels_per_pixel: + raise exc.InversionException() return mesh_grid + + def check_adapt_background_pixels( + self, + mask: Mask2D, + mesh_grid: Grid2DIrregular, + adapt_data: Optional[np.ndarray], + image_mesh_min_mesh_pixels_per_pixel=None, + image_mesh_min_mesh_number: int = 5, + image_mesh_adapt_background_percent_threshold: float = None, + image_mesh_adapt_background_percent_check: float = 0.8, + ): + """ + Checks the number of mesh pixels in the background of the image-mesh and raises an `InversionException` if + there are fewer mesh pixels in the background than the input settings. + + This allows a user to force a model-fit to use image-mesh's which cluster a minimum number of mesh pixels to + the faintest regions of the image data (E.g. the lowest weighted regions). This prevents too few image-mesh + pixels being allocated to the background of the data. + + The check works as follows: + + 1) Find all pixels in the background of the `adapt_data`, which are N pixels with the lowest values, where N is + a percentage given by `image_mesh_adapt_background_percent_check`. If N is 50%, then the half of + pixels in `adapt_data` with the lowest values will be checked. + 2) Sum the total number of mesh pixels in these background pixels, thereby estimating the number of mesh pixels + assigned to background pixels. + 3) Compare this value to the total number of mesh pixels multiplied + by `image_mesh_adapt_background_percent_threshold` and raise an `InversionException` if the number + of mesh pixels is below this value, meaning the background did not have sufficient mesh pixels in it. + + Therefore, by setting `image_mesh_adapt_background_percent_threshold` the code is forced + to adapt the image mesh in a way that places many mesh pixels in the background regions. + + Parameters + ---------- + mask + The mask of the dataset being analysed, which the pixelization grid maps too. The number of + mesh pixels mapped inside each of this mask's image-pixels is returned. + mesh_grid + The image mesh-grid computed by the class which adapts to the data's mask. The number of image mesh pixels + that fall within each of the data's mask pixels is returned. + adapt_data + A image which represents one or more components in the masked 2D data in the image-plane. + settings + The inversion settings, which have the criteria dictating if the image-mesh has clustered enough or if + an exception is raised. + """ + + if os.environ.get("PYAUTOFIT_TEST_MODE") == "1": + return + + if image_mesh_adapt_background_percent_threshold is not None: + pixels = mesh_grid.shape[0] + + pixels_in_background = int( + mask.shape_slim * image_mesh_adapt_background_percent_check + ) + + indices_of_lowest_values = np.argsort(adapt_data)[:pixels_in_background] + mask_background = np.zeros_like(adapt_data, dtype=bool) + mask_background[indices_of_lowest_values] = True + + mesh_pixels_per_image_pixels = self.mesh_pixels_per_image_pixels_from( + mask=mask, mesh_grid=mesh_grid + ) + + mesh_pixels_in_background = sum( + mesh_pixels_per_image_pixels[mask_background] + ) + + if mesh_pixels_in_background < ( + pixels * image_mesh_adapt_background_percent_threshold + ): + raise exc.InversionException() diff --git a/autoarray/inversion/pixelization/image_mesh/kmeans.py b/autoarray/inversion/pixelization/image_mesh/kmeans.py index 55ecb99fc..0ac630920 100644 --- a/autoarray/inversion/pixelization/image_mesh/kmeans.py +++ b/autoarray/inversion/pixelization/image_mesh/kmeans.py @@ -52,7 +52,6 @@ def image_plane_mesh_grid_from( self, mask: Mask2D, adapt_data: Optional[np.ndarray], - settings: SettingsInversion = None, ) -> Grid2DIrregular: """ Returns an image mesh by running a KMeans clustering algorithm on the weight map. diff --git a/autoarray/inversion/pixelization/image_mesh/overlay.py b/autoarray/inversion/pixelization/image_mesh/overlay.py index 654cf8ec7..755d60f22 100644 --- a/autoarray/inversion/pixelization/image_mesh/overlay.py +++ b/autoarray/inversion/pixelization/image_mesh/overlay.py @@ -186,7 +186,6 @@ def image_plane_mesh_grid_from( self, mask: Mask2D, adapt_data: Optional[np.ndarray] = None, - settings: SettingsInversion = None, ) -> Grid2DIrregular: """ Returns an image-mesh by overlaying a uniform grid of (y,x) coordinates over the masked image that the diff --git a/autoarray/inversion/pixelization/mesh/abstract.py b/autoarray/inversion/pixelization/mesh/abstract.py index f277e49a1..7d57e9043 100644 --- a/autoarray/inversion/pixelization/mesh/abstract.py +++ b/autoarray/inversion/pixelization/mesh/abstract.py @@ -112,10 +112,6 @@ def mesh_grid_from( ): raise NotImplementedError - @property - def requires_image_mesh(self): - return True - def __str__(self): return "\n".join(["{}: {}".format(k, v) for k, v in self.__dict__.items()]) diff --git a/autoarray/inversion/pixelization/mesh/rectangular.py b/autoarray/inversion/pixelization/mesh/rectangular.py index 4e03268c4..f753ccc78 100644 --- a/autoarray/inversion/pixelization/mesh/rectangular.py +++ b/autoarray/inversion/pixelization/mesh/rectangular.py @@ -151,10 +151,6 @@ def mesh_grid_from( xp=xp, ) - @property - def requires_image_mesh(self): - return False - class RectangularSource(RectangularMagnification): diff --git a/autoarray/inversion/pixelization/pixelization.py b/autoarray/inversion/pixelization/pixelization.py index 0ecdcece6..2898c7b8c 100644 --- a/autoarray/inversion/pixelization/pixelization.py +++ b/autoarray/inversion/pixelization/pixelization.py @@ -12,7 +12,6 @@ def __init__( self, mesh: AbstractMesh, regularization: Optional[AbstractRegularization] = None, - image_mesh: Optional[AbstractImageMesh] = None, ): """ Pairs a 2D grid of (y,x) coordinates with a 2D mesh, which can be combined with a ``Regularization`` @@ -118,9 +117,6 @@ def __init__( regularization The regularization object that can smooth ``Pixelization`` pixels with one another when it is used to reconstruct data via an `Inversion`. - image_mesh - The grid of mesh coordinates may be derived from the image, for example if the pixelization is adaptive. - This object controls how this mesh is computed. Examples -------- @@ -156,19 +152,8 @@ def __init__( model = af.Collection(galaxies=af.Collection(galaxy=galaxy)) """ - if mesh is not None: - if mesh.requires_image_mesh and image_mesh is None: - raise exc.PixelizationException( - """ - A pixelization has been created which requires an image-mesh to be supplied (e.g. Delaunay, Voronoi). - - However, not image-mesh has been input. - """ - ) - self.mesh = mesh self.regularization = regularization - self.image_mesh = image_mesh @property def mapper_grids_from(self) -> Callable: diff --git a/autoarray/preloads.py b/autoarray/preloads.py index 607aa6557..2e60dff68 100644 --- a/autoarray/preloads.py +++ b/autoarray/preloads.py @@ -22,6 +22,7 @@ def __init__( self, mapper_indices: np.ndarray = None, source_pixel_zeroed_indices: np.ndarray = None, + image_plane_mesh_grid_list: np.ndarray = None, linear_light_profile_blurred_mapping_matrix=None, ): """ @@ -36,6 +37,29 @@ def __init__( the source model is fixed (e.g. when fitting only the lens light), sections of the curvature matrix do not change and can be reused, avoiding redundant computation. + Returns a list of image-plane mesh-grids, which are image-plane grids defining the centres of the pixels of + the pixelization's mesh (e.g. the centres of Voronoi pixels). + + The `image_mesh` attribute of the pixelization object defines whether the centre of each mesh pixel are + determined in the image-plane. When this is the case, the pixelization therefore has an image-plane mesh-grid, + which needs to be computed before the inversion is performed. + + This function iterates over all galaxies with pixelizations, determines which pixelizations have an + `image_mesh` and for these pixelizations computes the image-plane mesh-grid. + + It returns a list of all image-plane mesh-grids, which in the functions `mapper_from` and `mapper_galaxy_dict` + are grouped into a `Mapper` object with other information required to perform the inversion using the + pixelization. + + The order of this list is not important, because the `linear_obj_galaxy_dict` function associates each + mapper object (and therefore image-plane mesh-grid) with the galaxy it belongs to and is therefore used + elsewhere in the code (e.g. the fit module) to match inversion results to galaxies. + + Certain image meshes adapt their pixels to the dataset, for example congregating the pixels to the brightest + regions of the image. This requires that `adapt_images` are used when setting up the image-plane mesh-grid. + This function uses the `adapt_images` attribute of the `GalaxiesToInversion` object pass these images and + raise an error if they are not present. + Parameters ---------- mapper_indices @@ -46,6 +70,10 @@ def __init__( Indices of source pixels that should be set to zero in the reconstruction. These typically correspond to outer-edge source-plane regions with no image-plane mapping (e.g. outside a circular mask), helping separate the lens light from the pixelized source model. + image_plane_mesh_grid + The (y,x) coordinates of the image-plane mesh grid used by pixelizations that start from pixels + being defined in the image-plane (e.g. overlaying a uniform grid of pixels on the image-plane, which + make up Delaunay triangles in the source-plane). linear_light_profile_blurred_mapping_matrix The evaluated images of the linear light profiles that make up the blurred mapping matrix component of the inversion, with the other component being the pixelization's pixels. These are fixed when the lens light @@ -72,6 +100,19 @@ def __init__( self.source_pixel_zeroed_indices_to_keep = np.where(values_to_solve)[0] + if image_plane_mesh_grid_list is not None: + + self.image_plane_mesh_grid_list = [] + + for image_plane_mesh_grid in image_plane_mesh_grid_list: + + if image_plane_mesh_grid is not None: + self.image_plane_mesh_grid_list.append( + np.array(image_plane_mesh_grid) + ) + else: + self.image_plane_mesh_grid_list.append(None) + if linear_light_profile_blurred_mapping_matrix is not None: self.linear_light_profile_blurred_mapping_matrix = np.array( diff --git a/autoarray/structures/grids/uniform_2d.py b/autoarray/structures/grids/uniform_2d.py index 3a2e7c795..508bedc03 100644 --- a/autoarray/structures/grids/uniform_2d.py +++ b/autoarray/structures/grids/uniform_2d.py @@ -197,7 +197,9 @@ def over_sampler(self): from autoarray.operators.over_sampling.over_sampler import OverSampler - self._over_sampler = OverSampler(sub_size=self.over_sample_size.array.astype("int"), mask=self.mask) + self._over_sampler = OverSampler( + sub_size=self.over_sample_size.array.astype("int"), mask=self.mask + ) return self._over_sampler diff --git a/test_autoarray/inversion/pixelization/image_mesh/test_abstract.py b/test_autoarray/inversion/pixelization/image_mesh/test_abstract.py index 4df0ec9ac..c7d19b7a5 100644 --- a/test_autoarray/inversion/pixelization/image_mesh/test_abstract.py +++ b/test_autoarray/inversion/pixelization/image_mesh/test_abstract.py @@ -44,33 +44,31 @@ def test__mesh_pixels_per_image_pixels_from(mask, mesh_grid, image_mesh): def test__check_mesh_pixels_per_image_pixels(mask, mesh_grid, image_mesh): image_mesh.check_mesh_pixels_per_image_pixels( - mask=mask, mesh_grid=mesh_grid, settings=None + mask=mask, + mesh_grid=mesh_grid, ) image_mesh.check_mesh_pixels_per_image_pixels( mask=mask, mesh_grid=mesh_grid, - settings=aa.SettingsInversion( - image_mesh_min_mesh_pixels_per_pixel=3, image_mesh_min_mesh_number=1 - ), + image_mesh_min_mesh_pixels_per_pixel=3, + image_mesh_min_mesh_number=1, ) with pytest.raises(aa.exc.InversionException): image_mesh.check_mesh_pixels_per_image_pixels( mask=mask, mesh_grid=mesh_grid, - settings=aa.SettingsInversion( - image_mesh_min_mesh_pixels_per_pixel=5, image_mesh_min_mesh_number=1 - ), + image_mesh_min_mesh_pixels_per_pixel=5, + image_mesh_min_mesh_number=1, ) with pytest.raises(aa.exc.InversionException): image_mesh.check_mesh_pixels_per_image_pixels( mask=mask, mesh_grid=mesh_grid, - settings=aa.SettingsInversion( - image_mesh_min_mesh_pixels_per_pixel=3, image_mesh_min_mesh_number=2 - ), + image_mesh_min_mesh_pixels_per_pixel=3, + image_mesh_min_mesh_number=2, ) @@ -81,17 +79,17 @@ def test__check_adapt_background_pixels(mask, mesh_grid, image_mesh): ) image_mesh.check_adapt_background_pixels( - mask=mask, mesh_grid=mesh_grid, adapt_data=adapt_data, settings=None + mask=mask, + mesh_grid=mesh_grid, + adapt_data=adapt_data, ) image_mesh.check_adapt_background_pixels( mask=mask, mesh_grid=mesh_grid, adapt_data=adapt_data, - settings=aa.SettingsInversion( - image_mesh_adapt_background_percent_threshold=0.05, - image_mesh_adapt_background_percent_check=0.9, - ), + image_mesh_adapt_background_percent_threshold=0.05, + image_mesh_adapt_background_percent_check=0.9, ) with pytest.raises(aa.exc.InversionException): @@ -99,8 +97,6 @@ def test__check_adapt_background_pixels(mask, mesh_grid, image_mesh): mask=mask, mesh_grid=mesh_grid, adapt_data=adapt_data, - settings=aa.SettingsInversion( - image_mesh_adapt_background_percent_threshold=0.8, - image_mesh_adapt_background_percent_check=0.5, - ), + image_mesh_adapt_background_percent_threshold=0.8, + image_mesh_adapt_background_percent_check=0.5, )