diff --git a/igrins_instruments/igrins/__init__.py b/igrins_instruments/igrins/__init__.py deleted file mode 100644 index 43c145c..0000000 --- a/igrins_instruments/igrins/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -__all__ = ['AstroDataIGRINS', 'AstroDataIGRINS'] - -from astrodata import factory -from gemini_instruments.gemini import addInstrumentFilterWavelengths -from .adclass import AstroDataIGRINS -from .lookup import filter_wavelengths - -factory.addClass(AstroDataIGRINS) - -addInstrumentFilterWavelengths('fox', filter_wavelengths) - - diff --git a/igrins_instruments/igrins/adclass.py b/igrins_instruments/igrins/adclass.py deleted file mode 100644 index 5095dd9..0000000 --- a/igrins_instruments/igrins/adclass.py +++ /dev/null @@ -1,44 +0,0 @@ -from astrodata import astro_data_tag, astro_data_descriptor, returns_list, TagSet -from gemini_instruments import gmu -from gemini_instruments.common import Section -from . import lookup -from gemini_instruments.gemini import AstroDataGemini - -class AstroDataIGRINS(AstroDataGemini): - # single keyword mapping. add only the ones that are different - # from what's already defined in AstroDataGemini. - - __keyword_dict = dict() - - @staticmethod - def _matches_data(source): - return source[0].header.get('INSTRUME', '').upper() == 'IGRINS-2' - - @astro_data_tag - def _tag_instrument(self): - return TagSet(['IGRINS', 'VERSION2']) - - - @astro_data_tag - def _tag_flat(self): - #if self.phu.get('SOMEKEYWORD') == 'Flat_or_something': - # return TagSet(['FLAT', 'CAL']]) - pass - - # ------------------ - # Common descriptors - # ------------------ - - @astro_data_descriptor - def gain(self): - """ - Returns the gain (electrons/ADU) from lookup table - - Returns - ------- - float/list - gain - """ - return lookup.array_properties.get('gain') - - diff --git a/igrinsdr/igrins/parameters_igrins.py b/igrinsdr/igrins/parameters_igrins.py deleted file mode 100644 index 7fc2981..0000000 --- a/igrinsdr/igrins/parameters_igrins.py +++ /dev/null @@ -1,12 +0,0 @@ -# This parameter file contains the parameters related to the primitives -# define in the primitives_igrins.py file - -from gempy.library import config - -class somePrimitiveConfig(config.Config): - suffix = config.Field("Filename suffix", str, "_suffix") - param1 = config.Field("Param1", str, "default") - param2 = config.Field("do param2?", bool, False) - -class someStuffConfig(config.Config): - suffix = config.Field("Output suffix", str, "_somestuff") diff --git a/igrinsdr/igrins/primitives_igrins.py b/igrinsdr/igrins/primitives_igrins.py deleted file mode 100644 index 4be7e87..0000000 --- a/igrinsdr/igrins/primitives_igrins.py +++ /dev/null @@ -1,64 +0,0 @@ -# -# DRAGONS -# -# primitives_igrins.py -# ------------------------------------------------------------------------------ - -from gempy.gemini import gemini_tools as gt - -from geminidr.gemini.primitives_gemini import Gemini - -from . import parameters_igrins - -from .lookups import timestamp_keywords as igrins_stamps - -from recipe_system.utils.decorators import parameter_override -# ------------------------------------------------------------------------------ - -@parameter_override -class Igrins(Gemini): - """ - This class inherits from the level above. Any primitives specific - to IGRINS can go here. - """ - - tagset = {"GEMINI", "IGRINS"} - - def __init__(self, adinputs, **kwargs): - super(Igrins, self).__init__(adinputs, **kwargs) - self._param_update(parameters_igrins) - # Add IGRINS specific timestamp keywords - self.timestamp_keys.update(igrins_stamps.timestamp_keys) - - def someStuff(self, adinputs=None, **params): - """ - Write message to screen. Test primitive. - - Parameters - ---------- - adinputs - params - - Returns - ------- - - """ - log = self.log - log.debug(gt.log_message("primitive", self.myself(), "starting")) - - for ad in adinputs: - log.status('I see '+ad.filename) - - gt.mark_history(ad, primname=self.myself(), keyword="TEST") - ad.update_filename(suffix=params['suffix'], strip=True) - - return adinputs - - - @staticmethod - def _has_valid_extensions(ad): - """ Check that the AD has a valid number of extensions. """ - - # this needs to be updated at appropriate. - return len(ad) in [1] - diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..8116f62 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,22 @@ +[tool.poetry] +name = "igrinsdr" +version = "0.1.0" +description = "" +authors = ["Jae-Joon Lee "] + +packages = [ + { include = "igrinsdr", from = 'src'}, + { include = "igrins_instruments", from = 'src'}, +] + +[tool.poetry.dependencies] +python = ">3.7.1" +"stsci.image" = "^2.3.5" +pandas = "^1.3" + +[tool.poetry.dev-dependencies] +pytest = "^7.1.2" + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" diff --git a/igrins_instruments/__init__.py b/src/igrins_instruments/__init__.py similarity index 100% rename from igrins_instruments/__init__.py rename to src/igrins_instruments/__init__.py diff --git a/src/igrins_instruments/igrins/__init__.py b/src/igrins_instruments/igrins/__init__.py new file mode 100644 index 0000000..1be3b83 --- /dev/null +++ b/src/igrins_instruments/igrins/__init__.py @@ -0,0 +1,10 @@ +__all__ = ['AstroDataIGRINS', 'AstroDataIGRINS2'] + +from astrodata import factory +from .adclass import _AstroDataIGRINS, AstroDataIGRINS, AstroDataIGRINS2 + +factory.addClass(AstroDataIGRINS) +factory.addClass(AstroDataIGRINS2) + + + diff --git a/src/igrins_instruments/igrins/adclass.py b/src/igrins_instruments/igrins/adclass.py new file mode 100644 index 0000000..c55374a --- /dev/null +++ b/src/igrins_instruments/igrins/adclass.py @@ -0,0 +1,163 @@ +from astrodata import astro_data_tag, astro_data_descriptor, returns_list, TagSet +from gemini_instruments import gmu +from gemini_instruments.common import Section +from . import lookup +from gemini_instruments import igrins + +# Since gemini_instruments already defines AstroDataIgrins which is picked up +# by the mapper, we create a new AstroDataIGRINS inheriting from the +# gemini_instruments. + +class _AstroDataIGRINS(igrins.AstroDataIgrins): + # This is a temporary placeholder class. The original gemini_instruments + # version of AstroDataIgrins could be updated with it contents eventually. + + __keyword_dict = dict( + airmass = 'AMSTART', + wavelength_band = 'BAND', + detector_name = 'DETECTOR', + target_name = 'OBJECT', + observation_class = 'OBSCLASS', + observation_type = 'OBJTYPE', + ra = 'OBJRA', + dec = 'OBJDEC', + slit_x_center = 'SLIT_CX', + slit_y_center = 'SLIT_CY', + slit_width = 'SLIT_WID', + slit_length = 'SLIT_LEN', + slit_angle = 'SLIT_ANG' + ) + + @astro_data_tag + def _tag_sky(self): + if self.phu.get('OBJTYPE') == 'SKY' or self[0].hdr.get('OBJTYPE') == 'SKY': + return TagSet(['SKY', 'CAL']) + + @astro_data_descriptor + def observation_class(self): + """ + Returns 'class' the observation; one of, + + 'science', 'acq', 'projCal', 'dayCal', 'partnerCal', 'acqCal' + + An 'acq' is defined by BAND == 'S', where 'S' indicates a slit image. + + Returns + ------- + oclass: + One of the above enumerated names for observation class. + + """ + oclass = None + + otype = self.phu.get(self._keyword_for('observation_class')) + if not otype: + otype = self[0].hdr.get(self._keyword_for('observation_class')) + + if 'S' in self.wavelength_band(): + oclass = 'acq' + + if 'STD' in otype: + oclass = 'partnerCal' + elif 'TAR' in otype: + oclass = 'science' + elif otype in ["partnerCal"]: + oclass = 'partnerCal' + + return oclass + + @astro_data_descriptor + def observation_type(self): + """ + Returns 'type' the observation. For IGRINS, this will be one of, + + 'OBJECT', 'DARK', 'FLAT_OFF', 'FLAT_ON', 'ARC' + + Returns + ------- + otype: + Observation type. + + """ + otype = self.phu.get(self._keyword_for('observation_type')) + if not otype: + otype = self[0].hdr.get(self._keyword_for('observation_type')) + ftype = self.phu.get("FRMTYPE") + if not otype: + ftype = self[0].hdr.get("FRMTYPE") + + if otype in ['STD', 'TAR']: + otype = 'OBJECT' + elif otype in ['FLAT']: + otype = f"FLAT_{ftype}" + + return otype + + @astro_data_descriptor + def data_label(self): + + # The IGRINS data does not have a DATALAB keyword inthe header. Thus, + # the `data_label` descriptor return None, which raises an error during + # storeProcessedDark. We try to define a ad-hoc data label out of its + # file name. But it should be revisited. FIXME + + # The header has 'GEMPRID' etc, but no OBSID and such is defined. + _, obsdate, obsid = self.filename.split('.')[0].split('_')[:3] + datalab = f"igrins-{obsdate}-{obsid}" + return datalab + +class AstroDataIGRINS(_AstroDataIGRINS): + # single keyword mapping. add only the ones that are different + # from what's already defined in AstroDataGemini. + + @astro_data_tag + def _tag_instrument(self): + return TagSet(['IGRINS', 'VERSION1']) + + # ------------------ + # Common descriptors + # ------------------ + + # @returns_list + # @astro_data_descriptor + # def gain(self): + # """ + # Returns the gain (electrons/ADU) from lookup table + # + # Returns + # ------- + # float/list + # gain + # """ + # return lookup.array_properties.get('gain') + # + # @returns_list + # @astro_data_descriptor + # def read_noise(self): + # """ + # Returns the read_noise electron from lookup table + # + # Returns + # ------- + # float/list + # gain + # """ + # return lookup.array_properties.get('read_noise') + + +class AstroDataIGRINS2(AstroDataIGRINS): + # single keyword mapping. add only the ones that are different + # from what's already defined in AstroDataGemini. + + @staticmethod + def _matches_data(source): + grins = source[0].header.get('INSTRUME', '').upper() == 'IGRINS-2' + if not grins: + grins = source[1].header.get('INSTRUME', '').upper() == 'IGRINS-2' + + return grins + + @astro_data_tag + def _tag_instrument(self): + return TagSet(['IGRINS', 'VERSION2']) + diff --git a/igrins_instruments/igrins/lookup.py b/src/igrins_instruments/igrins/lookup.py similarity index 83% rename from igrins_instruments/igrins/lookup.py rename to src/igrins_instruments/igrins/lookup.py index 20ccc56..43a04ec 100644 --- a/igrins_instruments/igrins/lookup.py +++ b/src/igrins_instruments/igrins/lookup.py @@ -8,6 +8,8 @@ array_properties = { # EDIT AS NEEDED + # somehow the gain needs to be a list "gain" : 3, # electrons/ADU (MADE UP VALUE for example) + "read_noise": 9, -} \ No newline at end of file +} diff --git a/igrinsdr/__init__.py b/src/igrinsdr/__init__.py similarity index 100% rename from igrinsdr/__init__.py rename to src/igrinsdr/__init__.py diff --git a/igrinsdr/doc/usermanuals/IGRINSDR_UserManual/README b/src/igrinsdr/doc/usermanuals/IGRINSDR_UserManual/README similarity index 100% rename from igrinsdr/doc/usermanuals/IGRINSDR_UserManual/README rename to src/igrinsdr/doc/usermanuals/IGRINSDR_UserManual/README diff --git a/igrinsdr/igrins/__init__.py b/src/igrinsdr/igrins/__init__.py similarity index 100% rename from igrinsdr/igrins/__init__.py rename to src/igrinsdr/igrins/__init__.py diff --git a/src/igrinsdr/igrins/igrins_pipeline/procedures/destripe_helper.py b/src/igrinsdr/igrins/igrins_pipeline/procedures/destripe_helper.py new file mode 100644 index 0000000..a7b55e3 --- /dev/null +++ b/src/igrinsdr/igrins/igrins_pipeline/procedures/destripe_helper.py @@ -0,0 +1,168 @@ +import numpy as np +# import matplotlib.pyplot as plt + +from .destriper import stack64, concat + + +def _get_ny_slice(ny, dy): + n_dy = ny//dy + dy_slices = [slice(iy*dy, (iy+1)*dy) for iy in range(n_dy)] + return dy_slices + + +def get_pattern64_from_each_column(d): + ds = stack64(d) + p = concat(ds, [1, -1], 16) + return ds, p + + +def get_median_guard_row(d): + return np.median(d[[0, 1, 2, 3, -4, -3, -2, -1], :], axis=0) + + +def get_median_guard_column(d): + return np.median(d[:, [0, 1, 2, 3, -4, -3, -2, -1]], axis=1) + + +def get_pattern64_from_guard_column(d): + dd = np.median(d[:, [0, 1, 2, 3, -4, -3, -2, -1]], axis=1) + ds, p = get_pattern64_from_each_column(dd) + return ds, p + + +def subtract_column_median(d): + return d - np.median(d, axis=0) + + +def subtract_row_median(d): + return d - np.median(d, axis=1)[:, np.newaxis] + + +def get_row64_median(d): + ny_slices = _get_ny_slice(2048, 64) + return np.concatenate([np.broadcast_to(np.median(d[sl]), (len(d[sl]), )) + for sl in ny_slices]) + + +def subtract_row64_median(d): + ny_slices = _get_ny_slice(2048, 64) + return np.concatenate([d[sl] - np.median(d[sl]) for sl in ny_slices]) + + +def sub_row(d, r): + return d - r + + +def sub_column(d, c): + return d - c[:, np.newaxis] + + +# sky? +def sub_guard_column(d): + k = get_median_guard_column(d) + k0 = subtract_row64_median(k) + ds, p = get_pattern64_from_each_column(k0) + vv = d - p[:, np.newaxis] + xx = np.median(vv[-512:], axis=0) + return vv - xx + + +def sub_p64(d): + + k0 = get_row64_median(d) + dk = d - k0[:, np.newaxis] + ds, p = get_pattern64_from_each_column(dk) + d0k = subtract_row_median(dk - p) + + return d0k + + +# try subtract row64 first. + +def sub_p64_from_guard(d): + """ This removes p64 esimated from the guard column. You may need to further +remove median row/column values, but proper background needs to be removed first. + """ + + k = get_median_guard_column(d) + k0 = subtract_row64_median(k) + ds, p = get_pattern64_from_each_column(k0) + dd = d - p[:, np.newaxis] + # d2 = subtract_row_median(subtract_column_median(dd)) + + return dd + + +def get_p64_mask(d, destrip_mask): + # k = get_median_guard_column(d) + # k0 = get_row64_median(k) + # dk = d - k0[:, np.newaxis] + + # dk_mskd = np.ma.array(d, mask=destrip_mask) # .filled(np.nan) + kd = stack64(d, destrip_mask) + + p = concat(kd, [1, -1], 16) + + return kd, p + + +def sub_p64_mask(d, destrip_mask): + k = get_median_guard_column(d) + k0 = get_row64_median(k) + dk = d - k0[:, np.newaxis] + + dk_mskd = np.ma.array(dk, mask=destrip_mask) # .filled(np.nan) + kd = stack64(dk, destrip_mask) + + p = concat(kd, [1, -1], 16) + + # vertical pattern using the masked data + d5p = dk_mskd.filled(np.nan) - p + k0 = np.nanmedian(d5p, axis=1) + d5 = (dk - p) - k0[:, np.newaxis] + return d5 + + +def sub_p64_guard(v): + k = get_median_guard_column(v) + k0 = get_row64_median(k) + ds, p = get_pattern64_from_each_column(k - k0) + vv = v - k0[:, np.newaxis] - p[:, np.newaxis] + return vv + # d2 = subtract_row_median(subtract_column_median(dd)) + + +def sub_bg64_from_guard(v): + k = get_median_guard_column(v) + k0 = get_row64_median(k) + v1 = v - k0[:, np.newaxis] + return v1 + + +def sub_p64_upper_quater(v, subtract_bg64=False): + if subtract_bg64: + # It seems that this usually worsen the background + v1 = sub_bg64_from_guard(v) + else: + v1 = v + msk = np.ones(v.shape, dtype=bool) + msk[2048 - 384:] = False + td, p = get_p64_mask(v1, msk) + + return v1 - p + + +def sub_p64_with_bg(d, bg): + ds, p = get_pattern64_from_each_column(d - bg) + return d - p + + +def sub_p64_with_mask(d0, destrip_mask=None): + if destrip_mask is not None: + mask = ~np.isfinite(d0) | destrip_mask + else: + mask = ~np.isfinite(d0) + + ds, p = get_p64_mask(d0, mask) + return d0 - p + diff --git a/src/igrinsdr/igrins/igrins_pipeline/procedures/destriper.py b/src/igrinsdr/igrins/igrins_pipeline/procedures/destriper.py new file mode 100644 index 0000000..da7f6d9 --- /dev/null +++ b/src/igrinsdr/igrins/igrins_pipeline/procedures/destriper.py @@ -0,0 +1,439 @@ +import numpy as np + +from ..utils.image_combine import image_median + +# basic routines + +def stack_subrows(d, dy, mask=None, alt_sign=False, op="median"): + if mask is None: + mask = ~np.isfinite(d) + else: + mask = ~np.isfinite(d) | mask + + if len(d.shape) == 1: + ny, = d.shape + elif len(d.shape) == 2: + ny, nx = d.shape + else: + raise ValueError("unsupported shape: {}", d.shape) + + n_dy = ny//dy + dy_slices = [slice(iy*dy, (iy+1)*dy) for iy in range(n_dy)] + + from itertools import cycle + if mask is not None: + if alt_sign: + _sign = cycle([1, -1]) + dd = [d[sl][::next(_sign)] for sl in dy_slices] + _sign = cycle([1, -1]) + msk = [mask[sl][::next(_sign)] for sl in dy_slices] + else: + dd = [d[sl] for sl in dy_slices] + msk = [mask[sl] for sl in dy_slices] + + if op == "median": + ddm = image_median(dd, badmasks=msk) + elif op == "sum": + ddm = np.ma.array(dd, mask=msk).sum(axis=0) + elif op == "average": + ddm = np.ma.array(dd, mask=msk).average(axis=0) + else: + ValueError("unknown ope: {}".format(op)) + + else: + if alt_sign: + _sign = cycle([1, -1]) + dd = [d[sl][::next(_sign)] for sl in dy_slices] + else: + dd = [d[sl] for sl in dy_slices] + + if op == "median": + ddm = np.median(dd, axis=0) + elif op == "sum": + ddm = np.sum(dd, axis=0) + elif op == "average": + ddm = np.average(dd, axis=0) + else: + ValueError("unknown ope: {}".format(op)) + + return ddm + + +def stack128(d, mask=None, op="median"): + return stack_subrows(d, 128, mask=mask, alt_sign=False, op=op) + + +def stack64(d, mask=None, op="median"): + return stack_subrows(d, 64, mask=mask, alt_sign=True, op=op) + + +def concat(stacked, iter_sign, n_repeat): + return np.concatenate([stacked[::s] for s in iter_sign] * n_repeat) + + + + + +# clf() +# for d in data_list: +# s128 = stack128(d) +# xx = np.median(s128, axis=0) +# s128m = s128 - xx +# s128_0 = np.median(s128m, axis=1) +# sm = s128m - s128_0[:, np.newaxis] + +# plot(xx) + +# clf() +# kk = plot(s128m, color="0.8", alpha=0.5) +# plot(s128_0, color="k") +# plot(s128_0 + s_std, color="r") +# plot(s128_0 - s_std, color="b") + + + +# dy = 128 +# if len(d.shape) == 1: +# ny, = d.shape +# elif len(d.shape) == 2: +# ny, nx = d.shape +# else: +# raise ValueError("unsupported shape: {}", d.shape) + +# n_dy = ny//dy +# dy_slices = [slice(iy*dy, (iy+1)*dy) for iy in range(n_dy)] +# from itertools import cycle +# if mask is not None: +# dd = [d[sl] for sl in dy_slices] +# alt_sign = cycle([1, -1]) +# msk = [mask[sl] for sl in dy_slices] +# ddm = image_median(dd, badmasks=msk) +# else: +# dd = [d[sl] for sl in dy_slices] +# ddm = np.median(dd, axis=0) + +# return ddm + +def get_stripe_pattern64(self, d, mask=None, + concatenate=True, + remove_vertical=True): + """ + if concatenate is True, return 2048x2048 array. + Otherwise, 128x2048 array. + """ + dy = 64 + n_dy = 2048//dy + dy_slices = [slice(iy*dy, (iy+1)*dy) for iy in range(n_dy)] + from itertools import cycle + if mask is not None: + if remove_vertical: + d = self._remove_vertical_smooth_bg(d, mask=mask) + alt_sign = cycle([1, -1]) + dd = [d[sl][::next(alt_sign)] for sl in dy_slices] + alt_sign = cycle([1, -1]) + msk = [mask[sl][::next(alt_sign)] for sl in dy_slices] + ddm = image_median(dd, badmasks=msk) + # dd1 = np.ma.array(dd, mask=msk) + # ddm = np.ma.median(dd1, axis=0) + else: + alt_sign = cycle([1, -1]) + dd = [d[sl][::next(alt_sign)] for sl in dy_slices] + ddm = np.median(dd, axis=0) + + if concatenate: + return np.concatenate([ddm, ddm[::-1]] * (n_dy//2)) + else: + return ddm + + +class Destriper(object): + def __init__(self): + self.dy = dy = 128 + self.n_dy = 2048//dy + self.dy_slices = [slice(iy*dy, (iy+1)*dy) for iy in range(self.n_dy)] + + def _remove_vertical_smooth_bg(self, d, mask=None): + ny, nx = d.shape + iy = np.arange(ny) + import scipy.ndimage as ni + md = ni.median_filter(d, [1, 7]) + mask2 = mask & np.isfinite(md) + p_list = [np.polyfit(iy[~mask2[:, ix]], + md[:, ix][~mask2[:, ix]], 4) for ix in range(nx)] + dd = np.vstack([np.polyval(p, iy) for p in p_list]) + return d - dd.T + + def get_stripe_pattern64(self, d, mask=None, + concatenate=True, + remove_vertical=True): + """ + if concatenate is True, return 2048x2048 array. + Otherwise, 128x2048 array. + """ + dy = 64 + n_dy = 2048//dy + dy_slices = [slice(iy*dy, (iy+1)*dy) for iy in range(n_dy)] + from itertools import cycle + if mask is not None: + if remove_vertical: + d = self._remove_vertical_smooth_bg(d, mask=mask) + alt_sign = cycle([1, -1]) + dd = [d[sl][::next(alt_sign)] for sl in dy_slices] + alt_sign = cycle([1, -1]) + msk = [mask[sl][::next(alt_sign)] for sl in dy_slices] + ddm = image_median(dd, badmasks=msk) + # dd1 = np.ma.array(dd, mask=msk) + # ddm = np.ma.median(dd1, axis=0) + else: + alt_sign = cycle([1, -1]) + dd = [d[sl][::next(alt_sign)] for sl in dy_slices] + ddm = np.median(dd, axis=0) + + if concatenate: + return np.concatenate([ddm, ddm[::-1]] * (n_dy//2)) + else: + return ddm + + def get_stripe_pattern128(self, d, mask=None, concatenate=True): + """ + if concatenate is True, return 2048x2048 array. + Otherwise, 128x2048 array. + """ + dy_slices = self.dy_slices + if mask is not None: + dd = [d[sl] for sl in dy_slices] + msk = [mask[sl] for sl in dy_slices] + dd1 = np.ma.array(dd, mask=msk) + ddm = np.ma.median(dd1, axis=0) + else: + dd = [d[sl] for sl in dy_slices] + ddm = np.median(dd, axis=0) + + if concatenate: + return np.concatenate([ddm] * self.n_dy) + else: + return ddm + + def get_stripe_pattern128_flat(self, d, mask=None, concatenate=True): + """ + if concatenate is True, return 2048x2048 array. + Otherwise, 128x2048 array. + """ + dy_slices = self.dy_slices + if mask is not None: + dd = [d[sl] for sl in dy_slices] + msk = [mask[sl] for sl in dy_slices] + dd1 = np.ma.array(dd, mask=msk) + dd2 = dd1.transpose([1, 0, 2]).reshape((self.dy, -1)) + ddm = np.ma.median(dd2, axis=1) + else: + dd = np.array([d[sl] for sl in dy_slices]) + dd2 = dd.transpose([1, 0, 2]).reshape((self.dy, -1)) + ddm = np.median(dd2, axis=1) + + if concatenate: + return np.concatenate([ddm] * self.n_dy) + else: + return ddm + + def get_stripe_pattern2048(self, d, mask=None): + """ + if concatenate is True, return 2048x2048 array. + Otherwise, 128x2048 array. + """ + if mask is not None: + dd1 = np.ma.array(d, mask=mask) + ddm = np.ma.median(dd1, axis=1) + else: + ddm = np.median(d, axis=1) + + return ddm + + def get_destriped(self, d, mask=None, hori=None, pattern=128, + remove_vertical=True, return_pattern=False): + # if hori: + # s_hori = np.median(d, axis=0) + # d = d - s_hori + if hori is None: + if pattern == 2048: + hori = True + + if pattern == 64: + ddm = self.get_stripe_pattern64(d, mask=mask, concatenate=True, + remove_vertical=remove_vertical) + d_ddm = d - ddm + elif pattern == 128: + ddm = self.get_stripe_pattern128(d, mask=mask, concatenate=True) + d_ddm = d - ddm + elif pattern == 2048: + ddm = self.get_stripe_pattern2048(d, mask=mask) + # ddm = self.get_stripe_pattern128_flat(d, mask=mask) + d_ddm = d - ddm[:, np.newaxis] + else: + raise ValueError("incorrect pattern value: %s" % pattern) + + if hori: + d_ddm_masked = np.ma.array(d_ddm, mask=mask) + s_hori = np.ma.median(d_ddm_masked, axis=1) + d_ddm = d_ddm - s_hori[:, np.newaxis] + + if return_pattern: + return np.array(d_ddm), ddm + else: + return np.array(d_ddm) + + def get_destriped_naive(self, d): + """ + if concatenate is True, return 2048x2048 array. + Otherwise, 128x2048 array. + """ + s_vert = np.median(d, axis=0) + d_vert = d - s_vert[np.newaxis, :] + + s_hori = np.median(d_vert, axis=1) + d_hori = d_vert - s_hori[:, np.newaxis] + + return d_hori + + +destriper = Destriper() + + +def check_readout_pattern(fig, hdu_list, axis=1): + + from mpl_toolkits.axes_grid1 import Grid + grid = Grid(fig, 111, (len(hdu_list), 1), + share_x=True, share_y=True, share_all=True, + label_mode="1") + + smin, smax = np.inf, -np.inf + for hdu, ax in zip(hdu_list, grid): + p_horizontal = np.median(hdu.data, axis=axis) + p_horizontal0 = np.median(p_horizontal) + p_horizontal -= p_horizontal0 + smin = min(smin, p_horizontal[100:-100].min()) + smax = max(smax, p_horizontal[100:-100].max()) + ax.plot(p_horizontal) + ax.axhline(0, color="0.5") + ax.locator_params(axis="y", nbins=5) + + ax = grid[-1] + ax.set_xlim(0, len(p_horizontal)) + sminmax = max(-smin, smax) + ax.set_ylim(-sminmax, sminmax) + if axis == 1: + ax.set_xlabel("y-pixel") + elif axis == 0: + ax.set_xlabel("x-pixel") + + ax.set_ylabel("ADUs") + + +def check_destriper(hdu, bias_mask, vmin, vmax): + import matplotlib.pyplot as plt + + destriper = Destriper() + + hh1 = destriper.get_destriped(hdu.data, mask=bias_mask, + pattern=2048) + hh2 = destriper.get_destriped(hdu.data, + pattern=64, mask=bias_mask) + + from matplotlib.colors import Normalize + + from mpl_toolkits.axes_grid1 import ImageGrid, Grid + fig1 = plt.figure(figsize=(13, 5)) + grid = ImageGrid(fig1, 111, (1, 3), + share_all=True, + label_mode="1", axes_pad=0.1, + cbar_mode="single") + + norm = Normalize(vmin=vmin, vmax=vmax) + for ax, dd in zip(grid, [hdu.data, hh1, hh2]): + im = ax.imshow(dd, norm=norm, + origin="lower", cmap="gray_r", + interpolation="none") + plt.colorbar(im, cax=grid[0].cax) + + grid[0].set_xlabel("x-pixel") + grid[0].set_ylabel("y-pixel") + fig1.tight_layout() + + fig2 = plt.figure() + grid = Grid(fig2, 111, (3, 1), + share_x=True, share_y=True, share_all=True, + label_mode="1", axes_pad=0.1) + for ax, dd in zip(grid, [hdu.data, hh1, hh2]): + s = np.median(dd[:, 1024-128:1024+128], axis=1) + ax.plot(s) + ax.axhline(0, color="0.5") + grid[2].set_xlim(0, 2048) + grid[2].set_xlabel("y-pixel") + fig2.tight_layout() + + return fig1, fig2 + + +# if __name__ == "__main__": +# from igrins_log import IGRINSLog + +# log_20140525 = dict(flat_off=range(64, 74), +# flat_on=range(74, 84), +# thar=range(3, 8)) + + +# igrins_log = IGRINSLog("20140525", log_20140525) + +# band = "K" + + +# if 1: # check ro pattern +# hdu_list = igrins_log.get_cal_hdus(band, "flat_off") + +# import matplotlib.pyplot as plt +# fig = plt.figure(figsize=(9, 8)) +# check_readout_pattern(fig, hdu_list, axis=1) +# fig.tight_layout() + +# fig2 = plt.figure(figsize=(9, 8)) +# check_readout_pattern(fig2, hdu_list, axis=0) +# fig2.tight_layout() + +# fig.savefig("readout_horizontal_pattern_%s.png" % band) +# fig2.savefig("readout_vertical_pattern_%s.png" % band) + +# if 1: # check destriper +# # with mask +# import pickle +# r = pickle.load(open("flat_info_20140316_%s.pickle" % band)) + +# from trace_flat import get_mask_bg_pattern +# bias_mask = get_mask_bg_pattern(r["flat_mask"], +# r["bottomup_solutions"]) + +# if 1: +# fig = plt.figure() +# ax=fig.add_subplot(111) +# ax.imshow(bias_mask, origin="lower", cmap="gray_r", +# vmin=0, vmax=1) +# fig.savefig("destriper_mask_%s.png"%band, bbox_inches="tight") + +# hdu_list = igrins_log.get_cal_hdus(band, "flat_off") +# hdu = hdu_list[0] + +# fig11, fig12 = check_destriper(hdu, None, vmin=-25, vmax=30) +# fig13, fig14 = check_destriper(hdu, bias_mask, vmin=-25, vmax=30) + +# fig11.savefig("destriper_test_image_flat_off_wo_mask_%s.png" % band) +# fig12.savefig("destriper_test_cut_flat_off_wo_mask_%s.png" % band) +# fig13.savefig("destriper_test_image_flat_off_w_mask_%s.png" % band) +# fig14.savefig("destriper_test_cut_flat_off_w_mask_%s.png" % band) + + +# hdu_list = igrins_log.get_cal_hdus(band, "thar") +# hdu = hdu_list[-1] + +# fig15, fig16 = check_destriper(hdu, bias_mask, vmin=-30, vmax=80) + +# fig15.savefig("destriper_test_image_thar_w_mask_%s.png" % band) +# fig16.savefig("destriper_test_cut_thar_w_mask_%s.png" % band) diff --git a/src/igrinsdr/igrins/igrins_pipeline/procedures/readout_pattern.py b/src/igrinsdr/igrins/igrins_pipeline/procedures/readout_pattern.py new file mode 100644 index 0000000..b5cd1af --- /dev/null +++ b/src/igrinsdr/igrins/igrins_pipeline/procedures/readout_pattern.py @@ -0,0 +1,450 @@ +from collections import OrderedDict + +import numpy as np +import scipy.ndimage as ni +from scipy.interpolate import LSQUnivariateSpline + +from . import destripe_helper as dh + + +def _get_slices(ny, dy): + n_dy = ny//dy + dy_slices = [slice(iy*dy, (iy+1)*dy) for iy in range(n_dy)] + return dy_slices + + +def get_ref(d): + """ + Ext + """ + dm = ni.median_filter(d, [1, 64]) + dd = d - dm + + ddm = ni.median_filter(dd, [64, 1]) + ddd = dd - ddm[:] + + return ddd + + +def get_individual_bg64(d, median_col=None): + ny_slices = dh._get_ny_slice(2048, 64) + rr = [] + for sl in ny_slices[:]: + # sl = ny_slices[-3] + r = np.median(d[sl][2:-2,:], axis=0) + if median_col is not None: + r = ni.median_filter(r, median_col) + # plot(r) + rr.append(r) + + return np.vstack(rr) + + +def sub_individual_bg64(d, median_col=None): + g = get_individual_bg64(d, median_col=median_col) + vv = np.vstack(np.swapaxes(np.broadcast_to(g, + (64,) + g.shape), 0, 1)) + + return d - vv + + +def get_guard_column(d): + k = dh.get_median_guard_column(d) + k0 = dh.subtract_row64_median(k) + ds, p = dh.get_pattern64_from_each_column(k0) + + return p + + +def sub_guard_column(d, mask=None): + p = get_guard_column(d) + vv = d - p[:, np.newaxis] + # xx = np.median(vv[-512:], axis=0) + return vv # - xx + + +# x = np.arange(2048) +# yy = [] +# for r in p64: +# p = np.polyfit(x[4:-4], r[4:-4], 1) +# y0 = np.polyval([p[0], 0], x) +# yy.append(y0) + +def get_row64_median(d, mask=None, q=None): + if q is None: + f = np.nanmedian + else: + def f(d2): + return np.nanpercentile(d2, q) + + d3 = np.ma.array(d, mask=mask).filled(np.nan) + + ny_slices = _get_slices(2048, 64) + return np.concatenate([np.broadcast_to(f(d3[sl]), (len(d3[sl]), )) + for sl in ny_slices]) + + +class PatternAmpWiseBiasClass(object): + def __init__(self, q=None): + self._q = q + + if q is None: + f = np.nanmedian + else: + def f(d2): + return np.nanpercentile(d2, q) + + self._f = f + self.slice_size = 64 + self.ny_slices = _get_slices(2048, self.slice_size) + + def get(self, d, mask=None): + d3 = np.ma.array(d, mask=mask).filled(np.nan) + + s = np.array([self._f(d3[sl]) for sl in self.ny_slices]) + + return s - np.median(s) + + def broadcast(self, d, s): + # return np.broadcast_to(s, (self.slice_size, )) + kk = [np.broadcast_to(s1, (self.slice_size,)) for s1 in s] + k0 = np.concatenate(kk) + return k0[:, np.newaxis] + + def sub(self, d, mask=None): + s = self.get(d, mask) + p = self.broadcast(d, s) + + return d - p + +# def factory_get_amp_bias(q=None): + +# def _broadcast(s): +# return np.broadcast_to(s, (slice_size, )) + +# def _get_amp_bias(d2, mask=None): +# d3 = np.ma.array(d2, mask=mask).filled(np.nan) + +# s = np.array([f(d3[sl]) for sl in ny_slices]) + +# return s - np.median(s) + +# return _get_amp_bias, _broadcast + + # return np.concatenate([np.broadcast_to(f(d3[sl]), (len(d3[sl]), )) + # for sl in ny_slices]) + + # def _get_amp_bias(d2, mask=None): + # if q is None: + # k = np.median(d2, axis=1) + # else: + # k = np.percentile(d2, q, axis=1) + # k0 = dh.get_row64_median(k) + + # return k0 + + # return _get_amp_bias + + +# def factory_sub_amp_bias(q=None): +# _get_amp_bias, _broadcast = factory_get_amp_bias(q=None) + +# def _sub_amp_bias(d2, mask=None): +# s = _get_amp_bias(d2, mask) +# k0 = np.concatenate([_broadcast(s1) for s1 in s]) +# return d2 - k0[:, np.newaxis] + +# return _sub_amp_bias + + +# def factory_get_amp_bias_old(q=None): +# def _get_amp_bias(d2, mask=None): +# if q is None: +# k = np.median(d2, axis=1) +# else: +# k = np.percentile(d2, q, axis=1) +# k0 = dh.get_row64_median(k) + +# return k0 + +# return _get_amp_bias + + +# def factory_sub_amp_bias_old(q=None): +# _get_amp_bias = factory_get_amp_bias(q) + +# def _sub_amp_bias(d2, mask=None): +# k0 = _get_amp_bias(d2, mask) +# return d2 - k0[:, np.newaxis] + +# return _sub_amp_bias + + +# def get_col_median_slow_old(d5, mask=None): +# k = get_col_median(d5, mask=mask) +# # k = np.ma.median(np.ma.array(d5, mask=mask), axis=0) +# k = ni.median_filter(np.ma.array(k, mask=~np.isfinite(k)).filled(0), 64) +# # k = ni.median_filter(k, 64) +# return k - np.nanmedian(k) + +# def sub_col_median_slow_old(d5, mask=None): +# k = get_col_median_slow(d5, mask=mask) + +# return d5 - k + + +class PatternBase(object): + @classmethod + def get(kls, d5, mask=None): + pass + + @classmethod + def broadcast(kls, d5, k): + pass + + @classmethod + def sub(kls, d5, mask=None): + a = kls.get(d5, mask=mask) + k = kls.broadcast(d5, a) + + return d5 - k + + +class PatternP64Zeroth(PatternBase): + @classmethod + def get(kls, d1, mask=None): + p64a = dh.stack64(d1, mask) + p64a0 = np.median(p64a, axis=1) + + return p64a0 - np.median(p64a0) + + @classmethod + def broadcast(kls, d1, p64a0): + k = dh.concat(p64a0, [1, -1], 16) + return k[:, np.newaxis] + + +class PatternP64First(PatternBase): + + @classmethod + def get(kls, d0, mask=None): + p64 = dh.stack64(d0, mask=mask) + # p64 = p64_ - np.median(p64_, axis=1)[:, np.newaxis] + + # extract the slope component + a0 = np.nanmedian(p64[:, :1024], axis=1) + a1 = np.nanmedian(p64[:, 1024:], axis=1) + v = (a1 + a0)/2. + x = np.arange(2048) + u = (a1 - a0)[:, np.newaxis]/1024.*(x-1024.) + + assert np.all(np.isfinite(v)) + assert np.all(np.isfinite(u)) + + return v, u + + @classmethod + def broadcast(kls, d0, v_u): + v, u = v_u + p = dh.concat(v[:, np.newaxis] + u, [1, -1], 16) + + assert np.all(np.isfinite(p)) + + return p + + +class PatternP64ColWise(PatternBase): + + @classmethod + def get(kls, d1, mask=None): + p64a = dh.stack64(d1, mask) + + return p64a + + @classmethod + def broadcast(kls, d1, p64a): + # p64a0 = get_p64_pattern_each(d1, mask) + k = dh.concat(p64a, [1, -1], 16) + + return k + + +class PatternColWiseBias(PatternBase): + @classmethod + def get(kls, d5, mask=None): + if mask is not None: + d6 = np.ma.array(d5, mask=mask).filled(np.nan) + else: + d6 = d5 + + k = np.nanmedian(d6, axis=0) + # k = ni.median_filter(k, 64) + return k - np.nanmedian(k) + + @classmethod + def broadcast(kls, d5, k): + return k + + +class PatternColWiseBiasC64(PatternBase): + @classmethod + def get(kls, d5, mask=None): + k = PatternColWiseBias.get(d5, mask=mask) + # k = np.ma.median(np.ma.array(d5, mask=mask), axis=0) + k = np.ma.array(k, mask=~np.isfinite(k)).filled(0) + k = ni.median_filter(k, 64) + # k = np.nan_to_num(k) + ix = np.arange(len(k)) + tsize = 64 + t = np.arange(tsize, len(k), tsize) + spl = LSQUnivariateSpline(ix, k, t) + + knots, coeffs, k = spl._eval_args + return knots, coeffs, k + + @classmethod + def broadcast(kls, d5, tck): + spl = LSQUnivariateSpline._from_tck(tck) + s = spl(np.arange(len(d5))) + + return s + +# def sub_col_median_slow(d5, mask=None): +# tck = get_col_median_slow(d5, mask=mask) +# s = broadcast_col_median_slow(d5, tck) +# # spl = LSQUnivariateSpline._from_tck(tck) +# # s = spl(np.arange(len(d5))) + +# return d5 - s + + +class PatternRowWiseBias(PatternBase): + @classmethod + def get(kls, d6, mask=None): + if mask is not None: + d6 = np.ma.array(d6, mask=mask).filled(np.nan) + + c = np.nanmedian(d6, axis=1) + return c + + @classmethod + def broadcast(kls, d6, c): + return c[:, np.newaxis] + + +class PatternAmpP2(PatternBase): + @classmethod + def get(kls, d, mask=None): + """ + returns a tuple of two 32 element array. First is per-amp bias values. + The second is the [1,-1] amplitude for each amp. + """ + d = np.ma.array(d, mask=mask).filled(np.nan) + + do = d.reshape(32, 32, 2, -1) + av = np.nanmedian(do, axis=[1, 3]) + + amp_bias_mean = np.mean(av, axis=1) + amp_bias_amp = av[:, 0] - amp_bias_mean + + return amp_bias_mean, amp_bias_amp + + @classmethod + def broadcast(kls, d, av_p): + av, p = av_p + k = p[:, np.newaxis] * np.array([1, -1]) + v = np.zeros((32, 32, 2, 1)) + k[:, np.newaxis, :, np.newaxis] + avv = av.reshape(32, 1, 1, 1) + v + return avv.reshape(2048, 1) + + +class PatternAmpP2v1(PatternBase): + @classmethod + def get(kls, d, mask=None): + """ + returns a tuple of two 32 element array. First is per-amp bias values. + The second is the [1,-1] amplitude for each amp. + """ + d = np.ma.array(d, mask=mask).filled(np.nan) + + # TODO: This seems inefficient. See if there is a better way. + # we first estimate (32x) bias + do = d.reshape(32, 32, 2, -1) + av = np.nanmedian(do, axis=[1, 2, 3]) + dd = do - av.reshape(32, 1, 1, 1) + + # and then [1, -1] bias. + pp = np.array([1, -1])[np.newaxis, np.newaxis, :, np.newaxis] + p = np.nanmedian(dd * pp, axis=[1, 2, 3]) + return av, p + + @classmethod + def broadcast(kls, d, av_p): + av, p = av_p + k = p[:, np.newaxis] * np.array([1, -1]) + v = np.zeros((32, 32, 2, 1)) + k[:, np.newaxis, :, np.newaxis] + avv = av.reshape(32, 1, 1, 1) + v + return avv.reshape(2048, 1) + + # @classmethod + # def sub(kls, d, mask=None): + # av_p = kls.get(d, mask) + # return d - kls.broadcast(d, av_p) + + +class PatternAmpWiseBiasC64(PatternBase): + @classmethod + def get(kls, d6, mask=None): + g = get_individual_bg64(d6, median_col=64) + return g + + @classmethod + def broadcast(kls, d6, g): + vv = np.vstack(np.swapaxes(np.broadcast_to(g, + (64,) + g.shape), 0, 1)) + return vv + +# def get_amp_bias_variation(d7, mask=None): +# g = get_individual_bg64(d7, median_col=64) + +# return g + +# def sub_amp_bias_variation(d7, mask=None): +# return sub_individual_bg64(d7, 64) + + +def _get_std(ds, f_drop): + # s1, s2 = np.percentile(ds, 5), np.percentile(ds, 95) + s1 = np.percentile(ds, f_drop*100) + s2 = np.percentile(ds, 100 - f_drop*100) + msk = (s1 < ds) & (ds < s2) + # return np.ma.array(ds, mask=~msk).filled(np.nan) + return ds[msk].std() + + +def get_amp_std(d, f_drop=0.01): + ny_slices = _get_slices(2048, 64) + # return [np.nanpercentile(d[sl], 10) for sl in ny_slices] + return [_get_std(d[sl], f_drop) for sl in ny_slices] + + +def _apply(d, flist, mask=None, draw_hist_ax=None): + for f in flist: + d = f(d, mask=mask) + return d + + +def apply(d, p_list, mask=None, draw_hist_ax=None): + for p in p_list: + d = p.sub(d, mask=mask) + return d + + +pipes = OrderedDict(amp_wise_bias_r2=PatternAmpP2, + p64_0th_order=PatternP64Zeroth, + col_wise_bias_c64=PatternColWiseBiasC64, + p64_1st_order=PatternP64First, + col_wise_bias=PatternColWiseBias, + p64_per_column=PatternP64ColWise, + row_wise_bias=PatternRowWiseBias, + amp_wise_bias_c64=PatternAmpWiseBiasC64) diff --git a/src/igrinsdr/igrins/igrins_pipeline/procedures/readout_pattern_guard.py b/src/igrinsdr/igrins/igrins_pipeline/procedures/readout_pattern_guard.py new file mode 100644 index 0000000..24ce4b6 --- /dev/null +++ b/src/igrinsdr/igrins/igrins_pipeline/procedures/readout_pattern_guard.py @@ -0,0 +1,53 @@ +from collections import OrderedDict +import numpy as np + +from .readout_pattern import pipes + + +def get_column_percentile(guards, percentiles=None): + if percentiles is None: + percentiles = [10, 90] + # guards = d[:, [0, 1, 2, 3, -4, -3, -2, -1]] + r = OrderedDict(zip(percentiles, np.percentile(guards, percentiles))) + r["std"] = np.std(guards[(r[10] < guards) & (guards < r[90])]) + return r + + +def get_guard_column_pattern(d, pattern_noise_recipes=None): + if pattern_noise_recipes is None: + pipenames_dark1 = ['amp_wise_bias_r2', 'p64_0th_order'] + else: + pipenames_dark1 = pattern_noise_recipes + + guards = d[:, [0, 1, 2, 3, -4, -3, -2, -1]] + + pp = OrderedDict() + for k in pipenames_dark1: + p = pipes[k] + _ = p.get(guards) + guards = guards - p.broadcast(guards, _) + + guards = guards - np.median(guards) + s = get_column_percentile(guards) + pp[k] = dict(pattern=_, stat=s) + + return guards, pp + +# from igrins.procedures.readout_pattern import pipes, apply as apply_pipe +# pipes_dark1 = [pipes[k] for k in list(pipes)[:2]] +# pipes_dark2 = [pipes[k] for k in list(pipes)[2:3]] + + +def remove_pattern_from_guard(d, recipes=None): + + if recipes is None: + recipes = ['amp_wise_bias_r2', 'p64_0th_order'] + + guards = d[:, [0, 1, 2, 3, -4, -3, -2, -1]] + + for k in recipes: + p = pipes[k] + _ = p.get(guards) + d = d - p.broadcast(d, _) + + return d diff --git a/src/igrinsdr/igrins/igrins_pipeline/procedures/readout_pattern_helper.py b/src/igrinsdr/igrins/igrins_pipeline/procedures/readout_pattern_helper.py new file mode 100644 index 0000000..6596333 --- /dev/null +++ b/src/igrinsdr/igrins/igrins_pipeline/procedures/readout_pattern_helper.py @@ -0,0 +1,75 @@ +import numpy as np + +from .readout_pattern import pipes, apply as apply_pipe + + +def apply_rp_1st_phase(d, mask=None, subtract_col_wise_bias_c64=False): + """ + Note that this only apply + - amp_wise_bias_r2 + - p64_0th_order + + which is used for flat images + """ + if mask is None: + mask = np.zeros(d.shape, dtype=bool) + else: + mask = mask.copy() + + mask[:4] = True + mask[-4:] = True + + keys = ['amp_wise_bias_r2', 'p64_0th_order'] + + if subtract_col_wise_bias_c64: + keys.append('col_wise_bias_c64') + + p = [pipes[k] for k in keys] + + return apply_pipe(d, p, mask=mask) + + +# def apply_rp_1st_phase(d, mask=None): +# if mask is None: +# mask = np.zeros(d.shape, dtype=bool) +# else: +# mask = mask.copy() + +# mask[:4] = True +# mask[-4:] = True + +# p = [pipes[k] for k in ['amp_wise_bias_r2', +# 'p64_0th_order', +# 'col_wise_bias_c64']] + +# return apply_pipe(d, p, mask=mask) + + +def apply_rp_2nd_phase(d, mask=None): + if mask is None: + mask = np.zeros(d.shape, dtype=bool) + else: + mask = mask.copy() + + mask[:4] = True + mask[-4:] = True + + p = [pipes[k] for k in ['p64_1st_order', + 'col_wise_bias_c64', + 'amp_wise_bias_r2', + 'col_wise_bias']] + + return apply_pipe(d, p, mask=mask) + + +def apply_rp_3rd_phase(d): + p = [pipes[k] for k in ['p64_per_column', + 'row_wise_bias', + 'amp_wise_bias_c64']] + + return apply_pipe(d, p) + + +def sub_bg_from_slice(d, bg_y_slice): + s = np.nanmedian(d[bg_y_slice], axis=0) + return d - s[np.newaxis, :] diff --git a/src/igrinsdr/igrins/igrins_pipeline/utils/image_combine.py b/src/igrinsdr/igrins/igrins_pipeline/utils/image_combine.py new file mode 100644 index 0000000..89a72b6 --- /dev/null +++ b/src/igrinsdr/igrins/igrins_pipeline/utils/image_combine.py @@ -0,0 +1,2 @@ + +from stsci.image import median as image_median diff --git a/igrinsdr/igrins/lookups/BPM/README b/src/igrinsdr/igrins/lookups/BPM/README similarity index 100% rename from igrinsdr/igrins/lookups/BPM/README rename to src/igrinsdr/igrins/lookups/BPM/README diff --git a/igrinsdr/igrins/lookups/MDF/README b/src/igrinsdr/igrins/lookups/MDF/README similarity index 100% rename from igrinsdr/igrins/lookups/MDF/README rename to src/igrinsdr/igrins/lookups/MDF/README diff --git a/igrinsdr/igrins/lookups/__init__.py b/src/igrinsdr/igrins/lookups/__init__.py similarity index 100% rename from igrinsdr/igrins/lookups/__init__.py rename to src/igrinsdr/igrins/lookups/__init__.py diff --git a/igrinsdr/igrins/lookups/timestamp_keywords.py b/src/igrinsdr/igrins/lookups/timestamp_keywords.py similarity index 100% rename from igrinsdr/igrins/lookups/timestamp_keywords.py rename to src/igrinsdr/igrins/lookups/timestamp_keywords.py diff --git a/src/igrinsdr/igrins/parameters_igrins.py b/src/igrinsdr/igrins/parameters_igrins.py new file mode 100644 index 0000000..b84982c --- /dev/null +++ b/src/igrinsdr/igrins/parameters_igrins.py @@ -0,0 +1,32 @@ +# This parameter file contains the parameters related to the primitives +# define in the primitives_igrins.py file + +from gempy.library import config + +class selectFrameConfig(config.Config): + frmtype = config.Field("frametype to filter", str) + +class streamPatternCorrectedConfig(config.Config): + # rpc_mode = config.Field("RP Correction mode", str, "guard") + rpc_mode = config.Field("method to correct the pattern", str) + suffix = config.Field("Readout pattern corrected", str, "_rpc") + +class estimateNoiseConfig(config.Config): + pass + +class selectStreamConfig(config.Config): + stream_name = config.Field("stream name for the output", str) + +class addNoiseTableConfig(config.Config): + pass + +class setSuffixConfig(config.Config): + suffix = config.Field("output suffix", str) + +class somePrimitiveConfig(config.Config): + suffix = config.Field("Filename suffix", str, "_suffix") + param1 = config.Field("Param1", str, "default") + param2 = config.Field("do param2?", bool, False) + +class someStuffConfig(config.Config): + suffix = config.Field("Output suffix", str, "_somestuff") diff --git a/igrinsdr/igrins/parameters_igrins_echelle.py b/src/igrinsdr/igrins/parameters_igrins_echelle.py similarity index 100% rename from igrinsdr/igrins/parameters_igrins_echelle.py rename to src/igrinsdr/igrins/parameters_igrins_echelle.py diff --git a/src/igrinsdr/igrins/primitives_igrins.py b/src/igrinsdr/igrins/primitives_igrins.py new file mode 100644 index 0000000..4afd1ac --- /dev/null +++ b/src/igrinsdr/igrins/primitives_igrins.py @@ -0,0 +1,175 @@ +# +# DRAGONS +# +# primitives_igrins.py +# ------------------------------------------------------------------------------ + +from astropy.io import fits +from astropy.table import Table + +import astrodata +from gempy.gemini import gemini_tools as gt + +from geminidr.gemini.primitives_gemini import Gemini +from geminidr.core.primitives_nearIR import NearIR + +from . import parameters_igrins + +from .lookups import timestamp_keywords as igrins_stamps + +from recipe_system.utils.decorators import parameter_override +# ------------------------------------------------------------------------------ + +from .procedures.procedure_dark import (make_guard_n_bg_subtracted_images, + estimate_amp_wise_noise) + + +@parameter_override +class Igrins(Gemini, NearIR): + """ + This class inherits from the level above. Any primitives specific + to IGRINS can go here. + """ + + tagset = {"GEMINI", "IGRINS"} + + def __init__(self, adinputs, **kwargs): + super(Igrins, self).__init__(adinputs, **kwargs) + self._param_update(parameters_igrins) + # Add IGRINS specific timestamp keywords + self.timestamp_keys.update(igrins_stamps.timestamp_keys) + + def selectFrame(self, adinputs=None, **params): + """Filter the adinputs by its FRMTYPE value in the header. + """ + frmtype = params["frmtype"] + adoutputs = [ad for ad in adinputs + if frmtype in ad.hdr['FRMTYPE']] + return adoutputs + + def streamPatternCorrected(self, adinputs=None, **params): + """ + make images with Readout pattern corrected. And add them to streams. + + """ + log = self.log + log.debug(gt.log_message("primitive", self.myself(), "starting")) + + rpc_mode = params.get("rpc_mode") + assert rpc_mode == "full" + # FIXME: only 'full' mode is supported for now, which will create + # images using the methods of ['guard', 'level2', 'level3'] + + dlist = [ad[0].data for ad in adinputs] + hdu_list = make_guard_n_bg_subtracted_images(dlist, + rpc_mode=rpc_mode, + bias_mask=None, + log=log) + for (name, dlist) in hdu_list: + # name: the name of the correction method applied. One of ["GUARD", + # "LEVEL2", "LEVEL3"] + # dlist : list of numpy images + adoutputs = [] + for ad0, d in zip(adinputs, dlist): + # we create new astrodata object based on the input's header. + hdu = fits.ImageHDU(data=d, header=ad0[0].hdr, + name='SCI') + ad = astrodata.create(ad0.phu, [hdu]) + gt.mark_history(ad, primname=self.myself(), + keyword="RPC") + + adoutputs.append(ad) + + self.streams[f"RPC_{name}"] = adoutputs + + return adinputs + + def estimateNoise(self, adinputs=None, **params): + """Estimate the noise characteriscs for images in each streams. The resulting + table is added to a 'ESTIMATED_NOISE' stream + """ + + # filenames that will be used in the table. + filenames = [ad.filename for ad in adinputs] + + kdlist = [(k[4:], [ad[0].data for ad in adlist]) + for k, adlist in self.streams.items() + if k.startswith("RPC_")] + + df = estimate_amp_wise_noise(kdlist, filenames=filenames) + # df : pandas datafrome object. + + # Convert it to astropy.Table and then to an astrodata object. + tbl = Table.from_pandas(df) + phu = fits.PrimaryHDU() + ad = astrodata.create(phu) + + astrodata.add_header_to_table(tbl) + ad.append(tbl, name='EST_NOISE') + + self.streams["ESTIMATED_NOISE"] = [ad] + + return adinputs + + def selectStream(self, adinputs=None, **params): + stream_name = params["stream_name"] + return self.streams[f"RPC_{stream_name}"] + + def addNoiseTable(self, adinputs=None, **params): + """ + The table from the 'EST_NOISE' stream will be appended to the input. + """ + # adinputs should contain a single ad of stacked dark. We attach table + # to the stacked dark. + + ad = adinputs[0] + + ad_noise_table = self.streams["ESTIMATED_NOISE"][0] + del self.streams["ESTIMATED_NOISE"] + + ad.append(ad_noise_table.EST_NOISE, name="EST_NOISE") + + return adinputs + + def setSuffix(self, adinputs=None, **params): + suffix = params["suffix"] + + # Doing this also makes the output files saved. + adinputs = self._markAsCalibration(adinputs, suffix=suffix, + primname=self.myself(), + keyword="NOISETABLE") + + return adinputs + + def someStuff(self, adinputs=None, **params): + """ + Write message to screen. Test primitive. + + Parameters + ---------- + adinputs + params + + Returns + ------- + + """ + log = self.log + log.debug(gt.log_message("primitive", self.myself(), "starting")) + + for ad in adinputs: + log.status('I see '+ad.filename) + + gt.mark_history(ad, primname=self.myself(), keyword="TEST") + ad.update_filename(suffix=params['suffix'], strip=True) + + return adinputs + + + @staticmethod + def _has_valid_extensions(ad): + """ Check that the AD has a valid number of extensions. """ + + # this needs to be updated at appropriate. + return len(ad) in [1] + diff --git a/igrinsdr/igrins/primitives_igrins_echelle.py b/src/igrinsdr/igrins/primitives_igrins_echelle.py similarity index 100% rename from igrinsdr/igrins/primitives_igrins_echelle.py rename to src/igrinsdr/igrins/primitives_igrins_echelle.py diff --git a/src/igrinsdr/igrins/procedures/procedure_dark.py b/src/igrinsdr/igrins/procedures/procedure_dark.py new file mode 100644 index 0000000..96df82a --- /dev/null +++ b/src/igrinsdr/igrins/procedures/procedure_dark.py @@ -0,0 +1,88 @@ +import numpy as np +import pandas as pd + +from ..igrins_pipeline.procedures.readout_pattern_guard import ( + remove_pattern_from_guard) + +from ..igrins_pipeline.procedures.readout_pattern_helper import ( + apply_rp_2nd_phase, + apply_rp_3rd_phase) + + +def make_guard_n_bg_subtracted_images(dlist, rpc_mode="guard", bias_mask=None, + log=None): + + cube = np.array([remove_pattern_from_guard(d) + for d in dlist]) + + if len(cube) < 5: + if log: + log.stdinfo("No background will be estimated, since at least 5 " + "input AstroData objects are required") + + bg = np.zeros_like(cube[0]) + cube1 = cube + else: + bg = np.median(cube, axis=0) + cube1 = cube - bg + + if rpc_mode == "guard": + return cube1 + + # cube20 = np.array([apply_rp_2nd_phase(d1) for d1 in cube1]) + cube2 = [apply_rp_2nd_phase(d1, mask=bias_mask) for d1 in cube1] + + if rpc_mode == "level2": + return cube2 + + cube3 = [apply_rp_3rd_phase(d1) for d1 in cube2] + + if rpc_mode == "level3": + return cube3 + + hdu_list = [ + ("GUARD_REMOVED", cube1), + ("ESTIMATED_BG", bg), + ("LEVEL2_REMOVED", cube2), + ("LEVEL3_REMOVED", cube3)] + + return hdu_list + + +def _get_per_amp_stat(cube, namp=32, threshold=100): + r = {} + + ds = cube.reshape((namp, -1)) + + msk_100 = np.abs(ds) > threshold + + r["count_gt_threshold"] = np.sum(msk_100, axis=1) + + r["stddev_lt_threshold"] = [np.std(ds1[~msk1]) + for ds1, msk1 in zip(ds, msk_100)] + + return r + + +def estimate_amp_wise_noise(kdlist, filenames=None): + + if filenames is None: + filenames = [f"{i:02d}" for i in range(len(kdlist))] + + # kl = ["DIRTY", "GUARD-REMOVED", "LEVEL2-REMOVED", "LEVEL3-REMOVED"] + dl = [] + for k, cube in kdlist: + for fn, c in zip(filenames, cube): + qq = _get_per_amp_stat(np.array(c)) + + ka = dict(filename=fn, level=k) + + _ = [dict(amp=i, + stddev_lt_threshold=q1, + count_gt_threshold=q2, **ka) + for i, (q1, q2) in enumerate(zip(qq["stddev_lt_threshold"], + qq["count_gt_threshold"]))] + + dl.extend(_) + + return pd.DataFrame(dl) diff --git a/igrinsdr/igrins/recipes/__init__.py b/src/igrinsdr/igrins/recipes/__init__.py similarity index 100% rename from igrinsdr/igrins/recipes/__init__.py rename to src/igrinsdr/igrins/recipes/__init__.py diff --git a/igrinsdr/igrins/recipes/sq/__init__.py b/src/igrinsdr/igrins/recipes/sq/__init__.py similarity index 100% rename from igrinsdr/igrins/recipes/sq/__init__.py rename to src/igrinsdr/igrins/recipes/sq/__init__.py diff --git a/igrinsdr/igrins/recipes/sq/recipes_DARK.py b/src/igrinsdr/igrins/recipes/sq/recipes_DARK.py similarity index 71% rename from igrinsdr/igrins/recipes/sq/recipes_DARK.py rename to src/igrinsdr/igrins/recipes/sq/recipes_DARK.py index d3a80c2..e0b15c9 100644 --- a/igrinsdr/igrins/recipes/sq/recipes_DARK.py +++ b/src/igrinsdr/igrins/recipes/sq/recipes_DARK.py @@ -20,10 +20,19 @@ def makeProcessedDark(p): """ p.prepare() - p.addDQ() - p.addVAR(read_noise=True) - #.... + # p.addDQ(static_bpm=None) + p.streamPatternCorrected() + p.estimateNoise(), + p.selectLevel3Removed(), + p.stackDarks(), + p.addNoiseTable(), + # p.makeIRAFCompatible() p.storeProcessedDark() + # p.prepare() + # p.addDQ() + # p.addVAR(read_noise=True) + # #.... + # p.storeProcessedDark() return _default = makeProcessedDark diff --git a/igrinsdr/igrins/recipes/sq/recipes_ECHELLE.py b/src/igrinsdr/igrins/recipes/sq/recipes_ECHELLE.py similarity index 100% rename from igrinsdr/igrins/recipes/sq/recipes_ECHELLE.py rename to src/igrinsdr/igrins/recipes/sq/recipes_ECHELLE.py diff --git a/src/igrinsdr/igrins/recipes/sq/recipes_FLAT.py b/src/igrinsdr/igrins/recipes/sq/recipes_FLAT.py new file mode 100644 index 0000000..210bd27 --- /dev/null +++ b/src/igrinsdr/igrins/recipes/sq/recipes_FLAT.py @@ -0,0 +1,94 @@ +""" +Recipes available to data with tags ['IGRINS', 'CAL', 'FLAT']. +""" + +recipe_tags = {'IGRINS', 'CAL', 'FLAT'} + +def estimateNoise(p): + """This recipe performs the analysis of irs readout pattern noise in flat off + images. It creates a stacked image of pattern removed images and add a + table that descibes its noise characteristics. The result is stored on disk + and has a name equal to the name of the first input image with + "_pattern_noise.fits" appended. + + Parameters + ---------- + p : PrimitivesCORE object + A primitive set matching the recipe_tags. + + """ + + # Given the list of adinputs of both flat on and off images, we first + # select the only the off images. + p.selectFrame(frmtype="OFF"), + p.prepare() + # it creates pattern corrected images with several methods (guard, level2, + # level3). The images are then added to the streams. + p.streamPatternCorrected(rpc_mode="full") + # Estimate some noise characteristics of images in each stream. A table is + # created and added to a 'ESTIMATED_NOISE' stream. + p.estimateNoise() + # Select the "level3_removed" stream and make it the output (i.e., input of + # next primitive) + p.selectStream(stream_name="LEVEL3_REMOVED") + p.stackDarks() + # The table from 'ESTIMATED_NOISE' stream is appended to the stacked image. + p.addNoiseTable() + # Set the suffix. + p.setSuffix(suffix="_pattern_noise") + return + +def makeProcessedFlat(p): + """ + This recipe performs the standardization and corrections needed to convert + the raw input dark images into a single stacked dark image. This output + processed dark is stored on disk and its information added to the calibration + database using storeProcessedDark and has a name + equal to the name of the first input bias image with "_bias.fits" appended. + + Parameters + ---------- + p : PrimitivesCORE object + A primitive set matching the recipe_tags. + """ + + p.prepare() + p.addDQ() + p.addVAR(read_noise=True) + #p.nonlinearityCorrect() + p.ADUToElectrons() + p.addVAR(poisson_noise=True) + p.makeLampFlat() + # does not yet support multiple slit/order + p.determineSlitEdges() + # does not yet support multiple slit/order. Note, name likely to change. + p.maskBeyondSlit() + # New primitive for IGRINS-2 + p.normalizeFlat() + p.thresholdFlatfield() + p.storeProcessedFlat() + return + +# _default = makeProcessedFlat + +# We set 'estimateNoise' as a default recipe for temporary, just for testing +# purpose. +_default = estimateNoise + + +def makeProcessedBPM(p): + """ + This recipe requires flats and uses the lamp-off as short darks. + """ + + p.prepare() + p.ADUToElectrons() + p.selectFromInputs(tags="LAMPOFF", outstream="darks") + p.selectFromInputs(tags="FLAT") + p.stackFrames(stream="darks") + p.makeLampFlat() + p.determineSlitEdges() + p.normalizeFlat() + p.makeBPM() + #p.storeBPM() + return \ No newline at end of file diff --git a/test/test_ad.py b/test/test_ad.py new file mode 100644 index 0000000..d715794 --- /dev/null +++ b/test/test_ad.py @@ -0,0 +1,17 @@ +import astrodata +# from gemini_instruments import gmos +import igrins_instruments +import importlib +importlib.reload(igrins_instruments) + +def test_ad_igrins(): + # fn = "../test/SDCH_20220301_0001.fits" + fn = "./test/SDCH_20220301_0001.fits" + ad = astrodata.open(fn) + assert ad.instrument() == "IGRINS" + assert ad.observation_class() == "partnerCal" + assert ad.observation_type() == "FLAT_OFF" + tags = set(["IGRINS", "RAW", "UNPREPARED", "VERSION1", "GEMINI"]) + assert tags.issubset(ad.tags) + +# test_ad_igrins()