From b23f5fdd758d4615bd1a5e67f068af6720f55186 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Fri, 3 May 2024 20:57:23 -0400 Subject: [PATCH 01/43] implementing long-term calibration routine --- python/lvmdrp/functions/run_calseq.py | 183 ++++++++++++++++++-------- 1 file changed, 125 insertions(+), 58 deletions(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index ccc077bf..cddb32d9 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -98,6 +98,70 @@ def get_sequence_metadata(mjd, expnums=None, exptime=None): return frames +def choose_sequence(frames, flavor, kind): + """Returns exposure numbers splitted in different sequences + + Parameters: + ---------- + frames : pd.DataFrame + Pandas dataframe containing frames metadata + flavor : str + Flavor of calibration frame: 'twilight', 'bias', 'flat', 'arc' + kind : str + Kind of calibration frame: 'nightly', 'longterm' + + Return: + ------ + list + list containing arrays of exposure numbers for each sequence + """ + if not isinstance(flavor, str) or flavor not in {"twilight", "bias", "flat", "arc"}: + raise ValueError(f"invalid flavor '{flavor}', available values are 'twilight', 'bias', 'flat', 'arc'") + if not isinstance(kind, str) or kind not in {"nightly", "longterm"}: + raise ValueError(f"invalid kind '{kind}', available values are 'nightly' and 'longterm'") + + if flavor == "twilight": + query = "imagetyp == 'flat' and not ldls|quartz" + elif flavor == "bias": + query = "imagetyp == 'bias'" + elif flavor == "flat": + query = "imagetyp == 'flat' and ldls|quartz" + elif flavor == "arc": + query = "imagetyp == 'arc' and not ldls|quartz and neon|hgne|argon|xenon" + expnums = frames.query(query).expnum.unique() + diff = np.diff(expnums) + div, = np.where(diff > 1) + + sequences = np.split(expnums, div+1) + log.info(f"found sequences: {sequences}") + + if len(sequences) == 0: + raise ValueError(f"no calibration frames of flavor '{flavor}' found using the query: {query}") + + lengths = [len(seq) for seq in sequences] + idx = lengths.index(min(lengths) if kind == "nightly" else max(lengths)) + if len(sequences) > 1: + chosen_expnums = sequences[idx] + else: + chosen_expnums = sequences[idx] + + if flavor == "twilight": + expected_length = 24 + elif flavor == "bias": + expected_length = 7 + elif flavor == "flat": + expected_length = 2 if kind == "nightly" else 24 + elif flavor == "arc": + expected_length = 2 if kind == "nightly" else 24 + + if len(chosen_expnums) != expected_length: + log.warning(f"wrong sequence length: {len(chosen_expnums)}") + + chosen_frames = frames.query("expnum in @chosen_expnums") + chosen_frames.sort_values(["expnum", "camera"], inplace=True) + return chosen_frames, chosen_expnums + + def _clean_ancillary(mjd, expnums=None, kind="all"): """Clean ancillary files @@ -1290,74 +1354,68 @@ def reduce_nightly_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_do def reduce_longterm_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_done=True, keep_ancillary=False): - pass - + """Reduces the long-term calibration sequence: -def run_calibration_sequence(mjds, masters_mjd=None, expnums=None, - pixelflats: bool = False, pixelmasks: bool = False, - illumination_corrections: bool = False): - """Run the calibration sequence reduction + The long-term calibration sequence consists of the following exposures: + * 7 - 9 bias + * 24 dome flat (12: LDLS and 12: quartz) + * 24 arc (12: 10s and 12: 50s exposures) + * ~12 - 24 twilight (~half exposures at dawn and twilight) - Given a set of MJDs and (optionally) exposure numbers, run the calibration - sequence reduction. This routine will store the master calibration frames - in the corresponding calibration directory in the `masters_mjd` or by - default in the smallest MJD in `mjds`. + This routine will create *long-term* master calibrations + at $LVM_SANDBOX/calib/{mjd} Parameters: ---------- - mjds : list - List of MJDs to reduce - masters_mjd : float - MJD to store the master frames in - expnums : list - List of exposure numbers to reduce - pixelflats : bool - Flag to create pixel flats - pixelmasks : bool - Flag to create pixel masks - illumination_corrections : bool - Flag to create illumination corrections + mjd : int + MJD to reduce + use_fiducial_cals : bool + Whether to use fiducial calibration frames or not, defaults to True + reject_cr : bool + Reject cosmic rays in 2D reduction, by default True + skip_done : bool + Skip pipeline steps that have already been done + keep_ancillary : bool + Keep ancillary files, by default False """ + cal_imagetyps = {"bias", "flat", "arc"} + log.info(f"going to reduce nightly calibration frames: {cal_imagetyps}") - # split exposures into the exposure sequence of each type of master frame - frames, masters_mjd = get_sequence_metadata(mjds, masters_mjd=masters_mjd, expnums=expnums) - bias_frames = frames.query("imagetyp == 'bias'") - dark_frames = frames.query("imagetyp == 'dark'") - ldls_frames = frames.query("imagetyp == 'flat & ldls") - qrtz_frames = frames.query("imagetyp == 'flat & quartz'") - twilight_frames = frames.query("imagetyp == 'flat'") - arc_frames = frames.query("imagetyp == 'arc'") - - # TODO: verify sequences completeness - - # reduce bias/dark - create_detrending_frames(mjds, masters_mjd=masters_mjd, expnums=set(bias_frames.expnum), kind="bias") - create_detrending_frames(mjds, masters_mjd=masters_mjd, expnums=set(dark_frames.expnum), kind="dark") - - # create traces - create_traces(mjds, use_fiducial_cals=masters_mjd, expnums_ldls=set(ldls_frames.expnum), expnums_qrtz=set(qrtz_frames.expnum), subtract_straylight=True) - - # create fiber flats - create_fiberflats(mjds, use_fiducial_cals=masters_mjd, expnums=set(twilight_frames.expnum)) + frames = md.get_frames_metadata(mjd) + frames.query("imagetyp in @cal_imagetyps", inplace=True) + if len(frames) == 0: + raise ValueError(f"no frames found for MJD = {mjd}") - # create wavelength solutions - create_wavelengths(mjds, use_fiducial_cals=masters_mjd, expnums=set(arc_frames.expnum)) + biases, bias_expnums = choose_sequence(frames, flavor="bias", kind="longterm") + if len(biases) != 0: + log.info(f"found {len(biases)} bias exposures: {bias_expnums}") + create_detrending_frames(mjd=mjd, expnums=bias_expnums, kind="bias", use_fiducial_cals=use_fiducial_cals, skip_done=skip_done, keep_ancillary=keep_ancillary) + else: + log.warning("no bias exposures found") - # create pixel flats - if pixelflats: - pixflat_frames = frames.query("imagetyp == 'pixelflat'") - create_detrending_frames(mjds, masters_mjd=masters_mjd, expnums=set(pixflat_frames.expnum), kind="pixflat") + dome_flats, dome_flat_expnums = choose_sequence(frames, flavor="flat", kind="longterm") + if len(dome_flats) != 0: + expnums_ldls = dome_flats.query("ldls").expnum.unique() + expnums_qrtz = dome_flats.query("quartz").expnum.unique() + log.info(f"found {len(dome_flats)} dome flat exposures: {dome_flat_expnums}") + create_traces(mjd=mjd, expnums_ldls=expnums_ldls, expnums_qrtz=expnums_qrtz, skip_done=skip_done) + # create_nightly_fiberflats(mjd=mjd, expnums_ldls=expnums_ldls, expnums_qrtz=expnums_qrtz, skip_done=skip_done) + else: + log.warning("no dome flat exposures found") - # create pixel mask - if pixelmasks: - create_pixelmasks(mjds, use_fiducial_cals=masters_mjd, - dark_expnums=set(dark_frames.expnum), - pixflat_expnums=set(pixflat_frames.expnum), - ignore_pixflats=False) + arcs, arc_expnums = choose_sequence(frames, flavor="arc", kind="longterm") + if len(arcs) != 0: + log.info(f"found {len(arcs)} arc exposures: {arc_expnums}") + create_wavelengths(mjd=mjd, expnums=arcs.expnum.unique(), skip_done=skip_done) + else: + log.warning("no arc exposures found") - # create illumination corrections - if illumination_corrections: - create_illumination_corrections(mjds, masters_mjd=masters_mjd, expnums=expnums) + twilight_flats, twilight_expnums = choose_sequence(frames, flavor="twilight", kind="longterm") + if len(twilight_flats) != 0: + log.info(f"found {len(twilight_flats)} twilight exposures: {twilight_expnums}") + create_fiberflats(mjd=mjd, expnums=twilight_expnums, skip_done=skip_done) + else: + log.warning("no twilight exposures found") class lvmFlat(lvmFrame): @@ -1374,6 +1432,7 @@ def __init__(self, data=None, error=None, mask=None, self._blueprint = dp.load_blueprint(name="lvmFlat") self._template = dp.dump_template(dataproduct_bp=self._blueprint, save=False) + if __name__ == '__main__': import tracemalloc @@ -1409,7 +1468,15 @@ def __init__(self, data=None, error=None, mask=None, # expnums = [7352] # create_fiberflats(mjd=60255, expnums=expnums, median_box=10, niter=1000, threshold=(0.5,2.5), nknots=60, skip_done=True, display_plots=False) - reduce_nightly_sequence(mjd=60265, reject_cr=False, use_fiducial_cals=False, skip_done=True, keep_ancillary=True) + # reduce_nightly_sequence(mjd=60265, reject_cr=False, use_fiducial_cals=False, skip_done=True, keep_ancillary=True) + reduce_longterm_sequence(mjd=60265, reject_cr=False, use_fiducial_cals=False, skip_done=True, keep_ancillary=True) + + # frames = md.get_frames_metadata(60264) + # frames.sort_values(by="expnum", inplace=True) + # frames, sequence = split_sequences(frames, flavor="arc", kind="nightly") + # print(sequence) + # print(frames.to_string()) + except Exception as e: # snapshot = tracemalloc.take_snapshot() # top_stats = snapshot.statistics('lineno') From 2700262285c08c3019331c0e025a8e99f032e32e Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Sat, 4 May 2024 16:39:16 -0400 Subject: [PATCH 02/43] quick fix to frames metadata --- python/lvmdrp/functions/run_calseq.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index cddb32d9..d0dfade3 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -93,6 +93,14 @@ def get_sequence_metadata(mjd, expnums=None, exptime=None): if exptime is not None: frames.query("exptime == @exptime", inplace=True) + # simple fix of imagetyp, some images have the wrong type in the header + twilight_selection = (frames.imagetyp == "flat") & ~(frames.ldls|frames.quartz) + domeflat_selection = (frames.ldls|frames.quartz) & ~(frames.neon|frames.hgne|frames.argon|frames.xenon) + arc_selection = (frames.neon|frames.hgne|frames.argon|frames.xenon) & ~(frames.ldls|frames.quartz) + frames.loc[twilight_selection, "imagetyp"] = "flat" + frames.loc[domeflat_selection, "imagetyp"] = "flat" + frames.loc[arc_selection, "imagetyp"] = "arc" + frames.sort_values(["expnum", "camera"], inplace=True) return frames @@ -1469,7 +1477,7 @@ def __init__(self, data=None, error=None, mask=None, # create_fiberflats(mjd=60255, expnums=expnums, median_box=10, niter=1000, threshold=(0.5,2.5), nknots=60, skip_done=True, display_plots=False) # reduce_nightly_sequence(mjd=60265, reject_cr=False, use_fiducial_cals=False, skip_done=True, keep_ancillary=True) - reduce_longterm_sequence(mjd=60265, reject_cr=False, use_fiducial_cals=False, skip_done=True, keep_ancillary=True) + reduce_longterm_sequence(mjd=60177, reject_cr=False, use_fiducial_cals=False, skip_done=True, keep_ancillary=True) # frames = md.get_frames_metadata(60264) # frames.sort_values(by="expnum", inplace=True) From fd337d54052cd95810f281990cf043299ecbe696 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Sat, 4 May 2024 23:24:57 -0400 Subject: [PATCH 03/43] implementing QC shift reports & interactive mode to choose solve discrepancies --- bin/drp | 11 ++--- python/lvmdrp/functions/imageMethod.py | 63 +++++++++++++++++++++----- python/lvmdrp/functions/run_calseq.py | 33 ++++++++++++-- 3 files changed, 83 insertions(+), 24 deletions(-) diff --git a/bin/drp b/bin/drp index 1a81082c..f6415740 100755 --- a/bin/drp +++ b/bin/drp @@ -257,18 +257,13 @@ cli.add_command(nightly_calibrations) @click.option('-y', '--y-widths', type=int, default=20, help='the width of the cross-dispersion aperture in pixels') @click.option('-f', '--flat-spikes', type=int, default=21, help='the window within which to filter spikes') @click.option('-t', '--threshold-spikes', type=float, default=0.6, help='the threshold for the spikes filtering') -def fix_pixel_shifts(mjd, expnums, ref_expnums, wave_widths, y_widths, flat_spikes, threshold_spikes): +@click.option('-i', '--interactive', is_flag=True, default=False, help='flag to run in interactive mode when QC and DRP discrepancies are found') +def fix_pixel_shifts(mjd, expnums, ref_expnums, wave_widths, y_widths, flat_spikes, threshold_spikes, interactive): """ Fix the electronic pixel shifts in the raw data """ - # TODO: read Dmitry's reports on shifts - # TODO: calculate shifts and compare to Dmitry's - # TODO: if different, visually inspect the suspect frames - # TODO: manually correct shifts and pass as `shift_rows` parameter - # TODO: add shift corrections as header metadata - fix_raw_pixel_shifts(mjd=mjd, expnums=expnums, ref_expnums=ref_expnums, wave_widths=wave_widths, y_widths=y_widths, flat_spikes=flat_spikes, threshold_spikes=threshold_spikes, - skip_done=True) + interactive=interactive, skip_done=True) # register fix pixel shifts command cli.add_command(fix_pixel_shifts) diff --git a/python/lvmdrp/functions/imageMethod.py b/python/lvmdrp/functions/imageMethod.py index 97e55cf8..18b930fc 100644 --- a/python/lvmdrp/functions/imageMethod.py +++ b/python/lvmdrp/functions/imageMethod.py @@ -536,9 +536,9 @@ def select_lines_2d(in_images, out_mask, in_cent_traces, in_waves, lines_list=No return lines_mask_2d, mtrace, mwave -def fix_pixel_shifts(in_images, out_images, ref_images, in_mask, +def fix_pixel_shifts(in_images, out_images, ref_images, in_mask, report=None, max_shift=10, threshold_spikes=0.6, flat_spikes=11, - fill_gaps=20, shift_rows=None, display_plots=False): + fill_gaps=20, shift_rows=None, interactive=False, display_plots=False): """Corrects pixel shifts in raw frames based on reference frames and a selection of spectral regions Given a set of raw frames, reference frames and a mask, this function corrects pixel shifts @@ -554,6 +554,8 @@ def fix_pixel_shifts(in_images, out_images, ref_images, in_mask, list of input reference images for the same spectrograph in_mask : str input mask file for the channel stacked frame + report : dict, optional + input report with keys (spec, expnum) and values (shift_rows, amount), by default None max_shift : int, optional maximum shift in pixels, by default 10 threshold_spikes : float, optional @@ -562,6 +564,8 @@ def fix_pixel_shifts(in_images, out_images, ref_images, in_mask, width of the spike removal, by default 11 fill_gaps : int, optional width of the gap filling, by default 20 + interactive : bool, optional + interactive mode, by default False display_plots : bool, optional display plots, by default False @@ -629,16 +633,50 @@ def fix_pixel_shifts(in_images, out_images, ref_images, in_mask, raw_shifts = copy(shifts) corrs = numpy.zeros_like(shifts) - apply_shift = numpy.any(numpy.abs(shifts)>0) - if apply_shift: + # read QC reports with the electronic pixel shifts + if report is not None: + shift_rows, amount = report + qshifts = numpy.zeros(cdata.shape[0]) + for irow in shift_rows: + qshifts[irow:] = amount + else: + qshifts = None + + # compare QC reports with the electronic pixel shifts + apply_shifts = numpy.any(shifts) + if qshifts is not None: + qshifted_rows = numpy.where(numpy.gradient(qshifts) > 0)[0][1::2].tolist() shifted_rows = numpy.where(numpy.gradient(shifts) > 0)[0][1::2].tolist() - log.info(f"applying shifts to {shifted_rows = } ({numpy.sum(numpy.abs(shifts)>0)}) rows") - for image_out, out_image in zip(images_out, out_images): - image = copy(image_out) - mjd = image._header.get("SMJD", image._header["MJD"]) - expnum, camera = image._header["EXPOSURE"], image._header["CCD"] - imagetyp = image._header["IMAGETYP"] + log.info(f"QC reports shifted rows: {qshifted_rows}") + log.info(f"DRP shifted rows: {shifted_rows}") + if not numpy.all(qshifts == shifts): + log.warning("QC reports and DRP do not agree on the shifted rows") + if interactive: + log.info("interactive mode enabled") + answer = input("apply [q]c, [d]rp or [c]ustom shifts: ") + if answer.lower() == "q": + shifts = qshifts + log.info("choosing QC shifts") + elif answer.lower() == "d": + log.info("choosing DRP shifts") + elif answer.lower() == "c": + log.info("choosing custom shifts") + answer = input("provide custom shifts and press enter: ") + shifts = numpy.array([int(_) for _ in answer.split()]) + apply_shifts = numpy.any(numpy.abs(shifts)>0) + else: + log.warning(f"no shift will be applied to the images: {in_images}") + apply_shifts = False + for image_out, out_image in zip(images_out, out_images): + image = copy(image_out) + mjd = image._header.get("SMJD", image._header["MJD"]) + expnum, camera = image._header["EXPOSURE"], image._header["CCD"] + imagetyp = image._header["IMAGETYP"] + + if apply_shifts: + shifted_rows = numpy.where(numpy.gradient(shifts) > 0)[0][1::2].tolist() + log.info(f"applying shifts from rows {shifted_rows} ({numpy.sum(numpy.abs(shifts)>0)} affected rows)") for irow in range(len(shifts)): if shifts[irow] > 0: image_out._data[irow, :] = numpy.roll(image._data[irow, :], int(shifts[irow])) @@ -647,6 +685,7 @@ def fix_pixel_shifts(in_images, out_images, ref_images, in_mask, log.info(f"writing corrected image to {os.path.basename(out_image)}") image_out.writeFitsData(out_image) + if numpy.any(shifts): log.info("plotting results") fig, ax = create_subplots(to_display=display_plots, figsize=(15,7), sharex=True, layout="constrained") ax.set_title(f"{mjd = } - {expnum = } - {camera = } - {imagetyp = }", loc="left") @@ -665,8 +704,8 @@ def fix_pixel_shifts(in_images, out_images, ref_images, in_mask, figure_path="qa", label="pixel_shifts" ) - else: - log.info("no pixel shifts detected, no correction applied") + else: + log.info(f"no pixel shifts detected on frames: {in_images}") return shifts, corrs, images_out diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index d0dfade3..e413766f 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -170,6 +170,24 @@ def choose_sequence(frames, flavor, kind): return chosen_frames, chosen_expnums +def _load_shift_report(mjd): + """Reads QC reports with the electronic pixel shifts""" + + with open(os.path.join(os.environ["LVM_SANDBOX"], "shift_monitor", f"shift_{mjd}.txt"), "r") as f: + lines = f.readlines()[2:] + + shifts_report = {} + for line in lines: + cols = line.split() + _, exp, _, spec = cols[:4] + exp = int(exp) + spec = spec[-1] + shifts = np.array([int(_) for _ in cols[4:]]) + shifts_report[(spec, exp)] = (shifts[::2]+1, shifts[1::2]) + + return shifts_report + + def _clean_ancillary(mjd, expnums=None, kind="all"): """Clean ancillary files @@ -343,7 +361,7 @@ def messup_frame(mjd, expnum, spec="1", shifts=[1500, 2000, 3500], shift_size=-2 def fix_raw_pixel_shifts(mjd, ref_expnums, use_fiducial_cals=True, expnums=None, specs="123", y_widths=5, wave_list=None, wave_widths=0.6*5, max_shift=10, flat_spikes=11, - threshold_spikes=np.inf, shift_rows=None, skip_done=False, + threshold_spikes=np.inf, shift_rows=None, interactive=False, skip_done=False, display_plots=False): """Attempts to fix pixel shifts in a list of raw frames @@ -377,6 +395,8 @@ def fix_raw_pixel_shifts(mjd, ref_expnums, use_fiducial_cals=True, expnums=None, Threshold for spikes, by default np.inf shift_rows : dict Rows to shift, by default None + interactive : bool + Interactive mode when report and measured shifts are different, by default False skip_done : bool Skip pipeline steps that have already been done display_plots : bool @@ -399,6 +419,10 @@ def fix_raw_pixel_shifts(mjd, ref_expnums, use_fiducial_cals=True, expnums=None, if not imagetyps.issubset(ref_imagetyps): raise ValueError(f"the following image types are not present in the reference frames: {imagetyps - ref_imagetyps}") + shifts_path = os.path.join(os.getenv('LVM_SANDBOX'), 'shift_monitor', f'shift_{mjd}.txt') + if os.path.isfile(shifts_path): + shifts_report = _load_shift_report(mjd) + expnums_grp = frames.groupby("expnum") for spec in specs: for expnum in expnums_grp.groups: @@ -433,9 +457,10 @@ def fix_raw_pixel_shifts(mjd, ref_expnums, use_fiducial_cals=True, expnums=None, display_plots=display_plots) image_tasks.fix_pixel_shifts(in_images=rframe_paths, out_images=eframe_paths, - ref_images=cframe_paths, in_mask=mask_2d_path, flat_spikes=flat_spikes, - threshold_spikes=threshold_spikes, max_shift=max_shift, shift_rows=shift_rows.get((spec, expnum), None), - display_plots=display_plots) + ref_images=cframe_paths, in_mask=mask_2d_path, report=shifts_report.get((spec, expnum), None), + flat_spikes=flat_spikes, threshold_spikes=threshold_spikes, + max_shift=max_shift, shift_rows=shift_rows.get((spec, expnum), None), + interactive=interactive, display_plots=display_plots) def reduce_2d(mjd, use_fiducial_cals=True, expnums=None, exptime=None, From c3a5e2c4393f6c8a7d73ae48f495685113c72dd9 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Sun, 5 May 2024 00:54:37 -0400 Subject: [PATCH 04/43] implementing guess of reference frame --- python/lvmdrp/functions/imageMethod.py | 2 +- python/lvmdrp/functions/run_calseq.py | 52 +++++++++++++++++++++++--- 2 files changed, 48 insertions(+), 6 deletions(-) diff --git a/python/lvmdrp/functions/imageMethod.py b/python/lvmdrp/functions/imageMethod.py index 18b930fc..04fc49ee 100644 --- a/python/lvmdrp/functions/imageMethod.py +++ b/python/lvmdrp/functions/imageMethod.py @@ -686,7 +686,7 @@ def fix_pixel_shifts(in_images, out_images, ref_images, in_mask, report=None, image_out.writeFitsData(out_image) if numpy.any(shifts): - log.info("plotting results") + log.info(f"plotting results for {out_image}") fig, ax = create_subplots(to_display=display_plots, figsize=(15,7), sharex=True, layout="constrained") ax.set_title(f"{mjd = } - {expnum = } - {camera = } - {imagetyp = }", loc="left") y_pixels = numpy.arange(shifts.size) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index e413766f..b7c0644c 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -188,6 +188,36 @@ def _load_shift_report(mjd): return shifts_report +def _get_reference_expnum(frame, ref_frames): + """Get reference frame for a given frame + + Given a frame and a set of reference frames, get the reference frame for the + given frame. This routine will return the reference frame with the closest + exposure number to the given frame. + + Parameters: + ---------- + frame : pd.Series + Frame metadata + ref_frames : pd.DataFrame + Reference frames metadata + + Returns: + ------- + pd.Series + Reference frame metadata + """ + refs = ref_frames.loc[(ref_frames.imagetyp == frame.imagetyp) & (ref_frames.ldls == frame.ldls) & (ref_frames.quartz == frame.quartz)] + if len(refs) == 0: + raise ValueError(f"no reference frame found for {frame.imagetyp}") + idx = np.argmin(refs.expnum.sub(frame.expnum).abs()) + if idx > 0: + idx -= 1 + if idx == 0: + idx += 1 + return refs.iloc[idx] + + def _clean_ancillary(mjd, expnums=None, kind="all"): """Clean ancillary files @@ -408,8 +438,10 @@ def fix_raw_pixel_shifts(mjd, ref_expnums, use_fiducial_cals=True, expnums=None, elif not isinstance(shift_rows, dict): raise ValueError("shift_rows must be a dictionary with keys (spec, expnum) and values a list of rows to shift") - ref_frames = get_sequence_metadata(mjd, expnums=ref_expnums) + # get target frames & reference frames metadata frames = get_sequence_metadata(mjd, expnums=expnums) + ref_frames = get_sequence_metadata(mjd, expnums=ref_expnums) + if use_fiducial_cals: masters_mjd = get_master_mjd(mjd) masters_path = os.path.join(MASTERS_DIR, str(masters_mjd)) @@ -427,12 +459,20 @@ def fix_raw_pixel_shifts(mjd, ref_expnums, use_fiducial_cals=True, expnums=None, for spec in specs: for expnum in expnums_grp.groups: frame = expnums_grp.get_group(expnum).iloc[0] - ref_expnum = ref_frames.query("imagetyp == @frame.imagetyp").expnum.iloc[0] + + # find suitable reference frame for current frame + ref_frame = _get_reference_expnum(frame, ref_frames) + ref_expnum = ref_frame.expnum rframe_paths = sorted(path.expand("lvm_raw", hemi="s", camspec=f"?{spec}", mjd=mjd, expnum=expnum)) - cframe_paths = sorted(path.expand("lvm_raw", hemi="s", camspec=f"?{spec}", mjd=mjd, expnum=ref_expnum)) rframe_paths = [rframe_path for rframe_path in rframe_paths if ".gz" in rframe_path] - cframe_paths = [cframe_path for cframe_path in cframe_paths if ".gz" in cframe_path] + + # use fixed reference if exist, else use original raw frame + cframe_paths = sorted([path.full("lvm_anc", drpver=drpver, tileid=frame.tileid, mjd=mjd, kind="e", imagetype=frame.imagetyp, expnum=ref_expnum, camera=f"{channel}{spec}") for channel in "brz"]) + if not all([os.path.exists(cframe_path) for cframe_path in cframe_paths]): + cframe_paths = sorted(path.expand("lvm_raw", hemi="s", camspec=f"?{spec}", mjd=mjd, expnum=ref_expnum)) + cframe_paths = [cframe_path for cframe_path in cframe_paths if ".gz" in cframe_path] + eframe_paths = [path.full("lvm_anc", drpver=drpver, tileid=frame.tileid, mjd=mjd, kind="e", imagetype=frame.imagetyp, expnum=expnum, camera=f"{channel}{spec}") for channel in "brz"] mask_2d_path = path.full("lvm_anc", drpver=drpver, tileid=11111, mjd=mjd, imagetype="mask2d", expnum=0, camera=f"sp{spec}", kind="") @@ -1502,7 +1542,7 @@ def __init__(self, data=None, error=None, mask=None, # create_fiberflats(mjd=60255, expnums=expnums, median_box=10, niter=1000, threshold=(0.5,2.5), nknots=60, skip_done=True, display_plots=False) # reduce_nightly_sequence(mjd=60265, reject_cr=False, use_fiducial_cals=False, skip_done=True, keep_ancillary=True) - reduce_longterm_sequence(mjd=60177, reject_cr=False, use_fiducial_cals=False, skip_done=True, keep_ancillary=True) + # reduce_longterm_sequence(mjd=60177, reject_cr=False, use_fiducial_cals=False, skip_done=True, keep_ancillary=True) # frames = md.get_frames_metadata(60264) # frames.sort_values(by="expnum", inplace=True) @@ -1510,6 +1550,8 @@ def __init__(self, data=None, error=None, mask=None, # print(sequence) # print(frames.to_string()) + fix_raw_pixel_shifts(mjd=60412, expnums=[16148], ref_expnums=None, wave_widths=5000, y_widths=20, flat_spikes=21, threshold_spikes=0.6, skip_done=True, interactive=True, display_plots=True) + except Exception as e: # snapshot = tracemalloc.take_snapshot() # top_stats = snapshot.statistics('lineno') From 82c1b6bac0b241d4730db5305cceeafdaf46dcae Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Sun, 5 May 2024 12:56:59 -0400 Subject: [PATCH 05/43] handling annoying extra lines --- python/lvmdrp/functions/run_calseq.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index b7c0644c..c9480fba 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -178,7 +178,11 @@ def _load_shift_report(mjd): shifts_report = {} for line in lines: - cols = line.split() + cols = line[:-1].split() + if not cols: + continue + print(cols) + cols = [col for col in cols if col] _, exp, _, spec = cols[:4] exp = int(exp) spec = spec[-1] From 714af30cde273f293403777058be296110f9617a Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Sun, 5 May 2024 12:58:10 -0400 Subject: [PATCH 06/43] cleaning print --- python/lvmdrp/functions/run_calseq.py | 1 - 1 file changed, 1 deletion(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index c9480fba..ff9e8f7c 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -181,7 +181,6 @@ def _load_shift_report(mjd): cols = line[:-1].split() if not cols: continue - print(cols) cols = [col for col in cols if col] _, exp, _, spec = cols[:4] exp = int(exp) From 3af6c5d07b167b67d721b9a06ddd746a92bf9ede Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Sun, 5 May 2024 17:59:05 -0400 Subject: [PATCH 07/43] better handling of conflicting shifts (fixes Implement Dmitry's pixel shifts reports #85) --- python/lvmdrp/functions/imageMethod.py | 200 +++++++++++++++++-------- python/lvmdrp/functions/run_calseq.py | 1 - 2 files changed, 141 insertions(+), 60 deletions(-) diff --git a/python/lvmdrp/functions/imageMethod.py b/python/lvmdrp/functions/imageMethod.py index 04fc49ee..d1264053 100644 --- a/python/lvmdrp/functions/imageMethod.py +++ b/python/lvmdrp/functions/imageMethod.py @@ -407,6 +407,100 @@ def _fix_fiber_thermal_shifts(image, trace_cent, trace_width=None, trace_amp=Non return trace_cent_fixed, column_shifts, model +def _apply_electronic_shifts(images, out_images, drp_shifts, qc_shifts=None, custom_shifts=None, raw_shifts=None, + which_shifts="drp", apply_shifts=True, dry_run=False, display_plots=False): + """Applies the chosen electronic pixel shifts to the images and plots the results + + Parameters + ---------- + images : list + list of input images + out_images : list + list of output images + drp_shifts : numpy.ndarray + DRP electronic pixel shifts + qc_shifts : numpy.ndarray + QC electronic pixel shifts, by default None + custom_shifts : numpy.ndarray + custom electronic pixel shifts, by default None + raw_shifts : numpy.ndarray + raw DRP electronic pixel shifts, by default None + which_shifts : str + chosen electronic pixel shifts, by default "drp" + apply_shifts : bool + apply the shifts, by default True + dry_run : bool + dry run mode (does not save corrected images), by default False + display_plots : bool + display plots, by default False + + Returns + ------- + list + list of corrected images + numpy.ndarray + the chosen electronic pixel shifts + str + name of the chosen electronic pixel shifts ('drp', 'qc' or 'custom') + """ + images_out = [copy(image) for image in images] + for image, image_out, out_image in zip(images, images_out, out_images): + mjd = image._header.get("SMJD", image._header["MJD"]) + expnum, camera = image._header["EXPOSURE"], image._header["CCD"] + imagetyp = image._header["IMAGETYP"] + + if which_shifts == "drp": + this_shifts = drp_shifts + image_color = "Blues" + elif which_shifts == "qc": + this_shifts = qc_shifts + image_color = "Greens" + elif which_shifts == "custom": + this_shifts = custom_shifts + image_color = "Purples" + else: + this_shifts = drp_shifts + + if apply_shifts: + shifted_rows = numpy.where(numpy.gradient(this_shifts) > 0)[0][1::2].tolist() + log.info(f"applying shifts from rows {shifted_rows} ({numpy.sum(numpy.abs(this_shifts)>0)} affected rows)") + for irow in range(len(this_shifts)): + if this_shifts[irow] > 0: + image_out._data[irow, :] = numpy.roll(image._data[irow, :], int(this_shifts[irow])) + + if not dry_run: + log.info(f"writing corrected image to {os.path.basename(out_image)}") + image_out.writeFitsData(out_image) + images_out.append(image_out) + + log.info(f"plotting results for {out_image}") + fig, ax = create_subplots(to_display=display_plots, figsize=(15,7), sharex=True, layout="constrained") + ax.set_title(f"{mjd = } - {expnum = } - {camera = } - {imagetyp = }", loc="left") + y_pixels = numpy.arange(this_shifts.size) + if raw_shifts is not None: + ax.step(y_pixels, raw_shifts, where="mid", lw=0.5, color="0.9", label="raw DRP") + ax.step(y_pixels, this_shifts, where="mid", color="k", lw=3) + ax.step(y_pixels, drp_shifts, where="mid", lw=1, color="tab:blue", label="DRP") + if qc_shifts is not None: + ax.step(y_pixels, qc_shifts, where="mid", lw=2, color="tab:green", label="QC") + if custom_shifts is not None: + ax.step(y_pixels, custom_shifts, where="mid", lw=2, color="tab:purple", label="custom shifts") + ax.legend(loc="lower right", frameon=False) + ax.set_xlabel("Y (pixel)") + ax.set_ylabel("Shift (pixel)") + plot_image_shift(ax, image._data, this_shifts, cmap="Reds") + axis = plot_image_shift(ax, image_out._data, this_shifts, cmap=image_color, inset_pos=(0.14,1.0-0.32)) + plt.setp(axis, yticklabels=[], ylabel="") + save_fig( + fig, + product_path=out_image, + to_display=display_plots, + figure_path="qa", + label="pixel_shifts" + ) + return images_out, this_shifts, which_shifts + + def select_lines_2d(in_images, out_mask, in_cent_traces, in_waves, lines_list=None, y_widths=3, wave_widths=0.6*5, image_shape=(4080, 4120), channels="brz", display_plots=False): """Selects spectral features based on a list of wavelengths from a 2D raw frame @@ -593,18 +687,24 @@ def fix_pixel_shifts(in_images, out_images, ref_images, in_mask, report=None, rdata = copy(image._data) rdata = numpy.nan_to_num(rdata, nan=0) * mask._data - # load input images into output array to apply corrections if needed - images_out = [loadImage(in_image) for in_image in in_images] + # load input images and initialize output images + images = [loadImage(in_image) for in_image in in_images] + images_out = images + + # initialize custom shifts + cshifts = None + apply_shifts = True + which_shifts = "drp" # calculate pixel shifts or use provided ones if shift_rows is None: log.info("running row-by-row cross-correlation") - shifts, corrs = [], [] + dshifts, corrs = [], [] for irow in range(rdata.shape[0]): cimg_row = cdata[irow] rimg_row = rdata[irow] if numpy.all(cimg_row == 0) or numpy.all(rimg_row == 0): - shifts.append(0) + dshifts.append(0) corrs.append(0) continue @@ -616,24 +716,24 @@ def fix_pixel_shifts(in_images, out_images, ref_images, in_mask, report=None, corr = corr[mask] max_corr = numpy.argmax(corr) - shifts.append(shift[max_corr]) + dshifts.append(shift[max_corr]) corrs.append(corr[max_corr]) - shifts = numpy.asarray(shifts) + dshifts = numpy.asarray(dshifts) corrs = numpy.asarray(corrs) - raw_shifts = copy(shifts) - shifts = _remove_spikes(shifts, width=flat_spikes, threshold=threshold_spikes) - shifts = _fillin_valleys(shifts, width=fill_gaps) - shifts = _no_stepdowns(shifts) + raw_shifts = copy(dshifts) + dshifts = _remove_spikes(dshifts, width=flat_spikes, threshold=threshold_spikes) + dshifts = _fillin_valleys(dshifts, width=fill_gaps) + dshifts = _no_stepdowns(dshifts) else: log.info("using user provided pixel shifts") - shifts = numpy.zeros(cdata.shape[0]) + cshifts = numpy.zeros(cdata.shape[0]) for irow in shift_rows: - shifts[irow:] += 2 - raw_shifts = copy(shifts) - corrs = numpy.zeros_like(shifts) + cshifts[irow:] += 2 + raw_shifts = copy(cshifts) + corrs = numpy.zeros_like(cshifts) - # read QC reports with the electronic pixel shifts + # parse QC reports with the electronic pixel shifts if report is not None: shift_rows, amount = report qshifts = numpy.zeros(cdata.shape[0]) @@ -643,69 +743,51 @@ def fix_pixel_shifts(in_images, out_images, ref_images, in_mask, report=None, qshifts = None # compare QC reports with the electronic pixel shifts - apply_shifts = numpy.any(shifts) if qshifts is not None: qshifted_rows = numpy.where(numpy.gradient(qshifts) > 0)[0][1::2].tolist() - shifted_rows = numpy.where(numpy.gradient(shifts) > 0)[0][1::2].tolist() + shifted_rows = numpy.where(numpy.gradient(dshifts) > 0)[0][1::2].tolist() log.info(f"QC reports shifted rows: {qshifted_rows}") log.info(f"DRP shifted rows: {shifted_rows}") - if not numpy.all(qshifts == shifts): + if not numpy.all(qshifts == dshifts): + _apply_electronic_shifts(images=images, out_images=out_images, + drp_shifts=dshifts, qc_shifts=qshifts, raw_shifts=raw_shifts, + which_shifts="drp", apply_shifts=True, + dry_run=True, display_plots=display_plots) log.warning("QC reports and DRP do not agree on the shifted rows") if interactive: log.info("interactive mode enabled") answer = input("apply [q]c, [d]rp or [c]ustom shifts: ") if answer.lower() == "q": - shifts = qshifts log.info("choosing QC shifts") + shifts = qshifts + which_shifts = "qc" elif answer.lower() == "d": log.info("choosing DRP shifts") + shifts = dshifts + which_shifts = "drp" elif answer.lower() == "c": log.info("choosing custom shifts") - answer = input("provide custom shifts and press enter: ") - shifts = numpy.array([int(_) for _ in answer.split()]) + answer = input("provide comma-separated custom shifts and press enter: ") + shift_rows = numpy.array([int(_) for _ in answer.split(",")]) + cshifts = numpy.zeros(cdata.shape[0]) + for irow in shift_rows: + cshifts[irow:] += 2 + shifts = cshifts + corrs = numpy.zeros_like(cshifts) + which_shifts = "custom" apply_shifts = numpy.any(numpy.abs(shifts)>0) else: log.warning(f"no shift will be applied to the images: {in_images}") apply_shifts = False - for image_out, out_image in zip(images_out, out_images): - image = copy(image_out) - mjd = image._header.get("SMJD", image._header["MJD"]) - expnum, camera = image._header["EXPOSURE"], image._header["CCD"] - imagetyp = image._header["IMAGETYP"] - - if apply_shifts: - shifted_rows = numpy.where(numpy.gradient(shifts) > 0)[0][1::2].tolist() - log.info(f"applying shifts from rows {shifted_rows} ({numpy.sum(numpy.abs(shifts)>0)} affected rows)") - for irow in range(len(shifts)): - if shifts[irow] > 0: - image_out._data[irow, :] = numpy.roll(image._data[irow, :], int(shifts[irow])) - - # write corrected image - log.info(f"writing corrected image to {os.path.basename(out_image)}") - image_out.writeFitsData(out_image) - - if numpy.any(shifts): - log.info(f"plotting results for {out_image}") - fig, ax = create_subplots(to_display=display_plots, figsize=(15,7), sharex=True, layout="constrained") - ax.set_title(f"{mjd = } - {expnum = } - {camera = } - {imagetyp = }", loc="left") - y_pixels = numpy.arange(shifts.size) - ax.step(y_pixels, raw_shifts, where="mid", lw=1, color="red", linestyle="--") - ax.step(y_pixels, shifts, where="mid", lw=1) - ax.set_xlabel("Y (pixel)") - ax.set_ylabel("Shift (pixel)") - plot_image_shift(ax, image._data, shifts, cmap="Reds") - axis = plot_image_shift(ax, image_out._data, shifts, cmap="Blues", inset_pos=(0.14,1.0-0.32)) - plt.setp(axis, yticklabels=[], ylabel="") - save_fig( - fig, - product_path=out_image, - to_display=display_plots, - figure_path="qa", - label="pixel_shifts" - ) - else: - log.info(f"no pixel shifts detected on frames: {in_images}") + # apply pixel shifts to the images + images_out, _, _, = _apply_electronic_shifts(images=images, out_images=out_images, + drp_shifts=dshifts, qc_shifts=qshifts, custom_shifts=cshifts, + which_shifts=which_shifts, apply_shifts=apply_shifts, + dry_run=False, display_plots=display_plots) + else: + log.info(f"no pixel shifts detected on frames: {in_images}") + shifts = dshifts return shifts, corrs, images_out diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index ff9e8f7c..733793e3 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -181,7 +181,6 @@ def _load_shift_report(mjd): cols = line[:-1].split() if not cols: continue - cols = [col for col in cols if col] _, exp, _, spec = cols[:4] exp = int(exp) spec = spec[-1] From 063295433cb0b0e723e1da49162ca7ca5002b4db Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Sun, 5 May 2024 18:25:14 -0400 Subject: [PATCH 08/43] refining selection of reference & fixing bugs --- python/lvmdrp/functions/imageMethod.py | 21 +++++++++---------- python/lvmdrp/functions/run_calseq.py | 28 +++++++++++++++++--------- 2 files changed, 28 insertions(+), 21 deletions(-) diff --git a/python/lvmdrp/functions/imageMethod.py b/python/lvmdrp/functions/imageMethod.py index d1264053..7362b309 100644 --- a/python/lvmdrp/functions/imageMethod.py +++ b/python/lvmdrp/functions/imageMethod.py @@ -461,7 +461,7 @@ def _apply_electronic_shifts(images, out_images, drp_shifts, qc_shifts=None, cus else: this_shifts = drp_shifts - if apply_shifts: + if apply_shifts and numpy.any(this_shifts != 0): shifted_rows = numpy.where(numpy.gradient(this_shifts) > 0)[0][1::2].tolist() log.info(f"applying shifts from rows {shifted_rows} ({numpy.sum(numpy.abs(this_shifts)>0)} affected rows)") for irow in range(len(this_shifts)): @@ -498,6 +498,8 @@ def _apply_electronic_shifts(images, out_images, drp_shifts, qc_shifts=None, cus figure_path="qa", label="pixel_shifts" ) + else: + log.info(f"no shifts to apply, not need to write to {os.path.basename(out_image)}") return images_out, this_shifts, which_shifts @@ -721,7 +723,7 @@ def fix_pixel_shifts(in_images, out_images, ref_images, in_mask, report=None, dshifts = numpy.asarray(dshifts) corrs = numpy.asarray(corrs) - raw_shifts = copy(dshifts) + raw_shifts = None dshifts = _remove_spikes(dshifts, width=flat_spikes, threshold=threshold_spikes) dshifts = _fillin_valleys(dshifts, width=fill_gaps) dshifts = _no_stepdowns(dshifts) @@ -730,7 +732,7 @@ def fix_pixel_shifts(in_images, out_images, ref_images, in_mask, report=None, cshifts = numpy.zeros(cdata.shape[0]) for irow in shift_rows: cshifts[irow:] += 2 - raw_shifts = copy(cshifts) + raw_shifts = None corrs = numpy.zeros_like(cshifts) # parse QC reports with the electronic pixel shifts @@ -780,14 +782,11 @@ def fix_pixel_shifts(in_images, out_images, ref_images, in_mask, report=None, log.warning(f"no shift will be applied to the images: {in_images}") apply_shifts = False - # apply pixel shifts to the images - images_out, _, _, = _apply_electronic_shifts(images=images, out_images=out_images, - drp_shifts=dshifts, qc_shifts=qshifts, custom_shifts=cshifts, - which_shifts=which_shifts, apply_shifts=apply_shifts, - dry_run=False, display_plots=display_plots) - else: - log.info(f"no pixel shifts detected on frames: {in_images}") - shifts = dshifts + # apply pixel shifts to the images + images_out, shifts, _, = _apply_electronic_shifts(images=images, out_images=out_images, raw_shifts=raw_shifts, + drp_shifts=dshifts, qc_shifts=qshifts, custom_shifts=cshifts, + which_shifts=which_shifts, apply_shifts=apply_shifts, + dry_run=False, display_plots=display_plots) return shifts, corrs, images_out diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index 733793e3..e04259b6 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -209,15 +209,23 @@ def _get_reference_expnum(frame, ref_frames): pd.Series Reference frame metadata """ - refs = ref_frames.loc[(ref_frames.imagetyp == frame.imagetyp) & (ref_frames.ldls == frame.ldls) & (ref_frames.quartz == frame.quartz)] - if len(refs) == 0: + refs = ref_frames.loc[ + (ref_frames.imagetyp == frame.imagetyp) & + (ref_frames.ldls == frame.ldls) & + (ref_frames.quartz == frame.quartz) & + (ref_frames.neon == frame.neon) & + (ref_frames.hgne == frame.hgne) & + (ref_frames.xenon == frame.xenon) & + (ref_frames.argon == frame.argon)] + ref_expnums = refs.expnum.unique() + if len(ref_expnums) == 0: raise ValueError(f"no reference frame found for {frame.imagetyp}") - idx = np.argmin(refs.expnum.sub(frame.expnum).abs()) + idx = np.argmin(np.abs(ref_expnums-frame.expnum)) if idx > 0: idx -= 1 if idx == 0: idx += 1 - return refs.iloc[idx] + return ref_expnums[idx] def _clean_ancillary(mjd, expnums=None, kind="all"): @@ -391,7 +399,7 @@ def messup_frame(mjd, expnum, spec="1", shifts=[1500, 2000, 3500], shift_size=-2 return messed_up_frames -def fix_raw_pixel_shifts(mjd, ref_expnums, use_fiducial_cals=True, expnums=None, specs="123", +def fix_raw_pixel_shifts(mjd, expnums=None, ref_expnums=None, use_fiducial_cals=True, specs="123", y_widths=5, wave_list=None, wave_widths=0.6*5, max_shift=10, flat_spikes=11, threshold_spikes=np.inf, shift_rows=None, interactive=False, skip_done=False, display_plots=False): @@ -405,12 +413,12 @@ def fix_raw_pixel_shifts(mjd, ref_expnums, use_fiducial_cals=True, expnums=None, ---------- mjd : float MJD to reduce + expnums : list + List of exposure numbers to look for pixel shifts ref_expnums : list - List of reference exposure numbers to use as reference for good frames + List of reference exposure numbers to use as reference for good frames, by default None use_fiducial_cals : bool Whether to use fiducial calibration frames or not, defaults to True - expnums : list - List of exposure numbers to look for pixel shifts specs : str Spectrograph channels y_widths : int @@ -454,6 +462,7 @@ def fix_raw_pixel_shifts(mjd, ref_expnums, use_fiducial_cals=True, expnums=None, raise ValueError(f"the following image types are not present in the reference frames: {imagetyps - ref_imagetyps}") shifts_path = os.path.join(os.getenv('LVM_SANDBOX'), 'shift_monitor', f'shift_{mjd}.txt') + shifts_report = {} if os.path.isfile(shifts_path): shifts_report = _load_shift_report(mjd) @@ -463,8 +472,7 @@ def fix_raw_pixel_shifts(mjd, ref_expnums, use_fiducial_cals=True, expnums=None, frame = expnums_grp.get_group(expnum).iloc[0] # find suitable reference frame for current frame - ref_frame = _get_reference_expnum(frame, ref_frames) - ref_expnum = ref_frame.expnum + ref_expnum = _get_reference_expnum(frame, ref_frames) rframe_paths = sorted(path.expand("lvm_raw", hemi="s", camspec=f"?{spec}", mjd=mjd, expnum=expnum)) rframe_paths = [rframe_path for rframe_path in rframe_paths if ".gz" in rframe_path] From 218fd7217ac5db58361ed833df6f5ab9bf66f74f Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Sun, 5 May 2024 19:36:17 -0400 Subject: [PATCH 09/43] fixing finding of reference image --- python/lvmdrp/functions/run_calseq.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index e04259b6..ca378455 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -209,17 +209,16 @@ def _get_reference_expnum(frame, ref_frames): pd.Series Reference frame metadata """ - refs = ref_frames.loc[ - (ref_frames.imagetyp == frame.imagetyp) & - (ref_frames.ldls == frame.ldls) & - (ref_frames.quartz == frame.quartz) & - (ref_frames.neon == frame.neon) & - (ref_frames.hgne == frame.hgne) & - (ref_frames.xenon == frame.xenon) & - (ref_frames.argon == frame.argon)] + if frame.imagetyp == "flat" and frame.ldls|frame.quartz: + refs = ref_frames.query("imagetyp == 'flat' and ldls|quartz") + elif frame.imagetyp == "flat": + refs = ref_frames.query("imagetyp == 'flat' and not ldls|quartz") + else: + refs = ref_frames.query("imagetyp == @frame.imagetyp") + ref_expnums = refs.expnum.unique() - if len(ref_expnums) == 0: - raise ValueError(f"no reference frame found for {frame.imagetyp}") + if len(ref_expnums) < 2: + raise ValueError(f"no reference frame found for {frame.imagetyp}") idx = np.argmin(np.abs(ref_expnums-frame.expnum)) if idx > 0: idx -= 1 From 6f311736eb4f4fea2ccd2f132a3f75fd8e912871 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Sun, 5 May 2024 23:53:43 -0400 Subject: [PATCH 10/43] implementing CLI for long-term calibrations --- bin/drp | 23 ++++++++++- python/lvmdrp/functions/run_calseq.py | 55 +++++++++++++++++++-------- 2 files changed, 61 insertions(+), 17 deletions(-) diff --git a/bin/drp b/bin/drp index f6415740..d8720060 100755 --- a/bin/drp +++ b/bin/drp @@ -17,7 +17,12 @@ from lvmdrp.utils.metadata import get_frames_metadata, get_master_metadata from lvmdrp.utils.cluster import run_cluster from lvmdrp.functions.run_quickdrp import quick_science_reduction -from lvmdrp.functions.run_calseq import MASK_BANDS, create_fiberflats, reduce_nightly_sequence, fix_raw_pixel_shifts +from lvmdrp.functions.run_calseq import ( + MASK_BANDS, + create_fiberflats, + reduce_nightly_sequence, + reduce_longterm_sequence, + fix_raw_pixel_shifts) from sdss_access import Access @@ -249,6 +254,22 @@ def nightly_calibrations(mjd, skip_cr, use_fiducial_cals, skip_done, keep_ancill cli.add_command(nightly_calibrations) +@cli.command('long-term-cals', short_help='Run the long-term calibration sequence') +@click.option('-m', '--mjd', type=int, required=True, help='the MJD to reduce') +@click.option('-r', '--skip-cr', is_flag=True, default=False, help='flag to skip cosmic rays rejection') +@click.option('-f', '--use-fiducial-cals', is_flag=True, default=False, help='flag to use fiducial calibration frames') +@click.option('-s', '--skip-done', is_flag=True, default=False, help='flag to skip reduction steps that have already been done') +@click.option('-a', '--keep-ancillary', is_flag=True, default=False, help='flag to keep ancillary files') +def long_term_calibrations(mjd, skip_cr, use_fiducial_cals, skip_done, keep_ancillary): + """ Run the long-term calibration sequence """ + reduce_longterm_sequence(mjd=mjd, reject_cr=not skip_cr, + use_fiducial_cals=use_fiducial_cals, + skip_done=skip_done, keep_ancillary=keep_ancillary) + +# register long-term calibration sequence +cli.add_command(long_term_calibrations) + + @cli.command('fix-pixel-shifts', short_help='Fix the electronic pixel shifts in the raw data') @click.option('-m', '--mjd', type=int, help='the MJD to reduce') @click.option('-e', '--expnums', type=IntListType(), help='a list of exposure numbers to reduce') diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index ca378455..acfc7e03 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -29,7 +29,7 @@ import numpy as np from glob import glob from copy import deepcopy as copy -from shutil import copy2 +from shutil import copy2, move from itertools import product, groupby from typing import List, Tuple, Dict @@ -163,7 +163,7 @@ def choose_sequence(frames, flavor, kind): expected_length = 2 if kind == "nightly" else 24 if len(chosen_expnums) != expected_length: - log.warning(f"wrong sequence length: {len(chosen_expnums)}") + log.warning(f"wrong sequence length for {flavor = }: {len(chosen_expnums)}, expected {expected_length}") chosen_frames = frames.query("expnum in @chosen_expnums") chosen_frames.sort_values(["expnum", "camera"], inplace=True) @@ -227,32 +227,38 @@ def _get_reference_expnum(frame, ref_frames): return ref_expnums[idx] -def _clean_ancillary(mjd, expnums=None, kind="all"): +def _clean_ancillary(mjd, expnums=None, imagetyp="all"): """Clean ancillary files Given a set of MJDs and (optionally) exposure numbers, clean the ancillary - files for the given kind of frames. This routine will remove the ancillary - files for the given kind of frames in the corresponding calibration + files for the given imagetyp of frames. This routine will remove the ancillary + files for the given imagetyp of frames in the corresponding calibration directory in the `masters_mjd` or by default in the smallest MJD in `mjds`. Parameters: ---------- mjd : int - MJD to reduce + MJD to clean expnums : list - List of exposure numbers to reduce - kind : str - Kind of frame to reduce, defaults to "all" + List of exposure numbers to clean + imagetyp : str + type of frame to remove, defaults to "all" """ # get frames metadata frames = get_sequence_metadata(mjd, expnums=expnums) # filter by target image types - if kind in ["bias", "dark", "flat", "arc", "object"]: - frames.query("imagetyp == @kind", inplace=True) - elif kind != "all": - raise ValueError(f"Invalid kind: '{kind}'. Must be one of 'bias', 'dark', 'flat', 'arc', 'object' or 'all'") + ALL = {"bias", "dark", "flat", "arc", "object", "cent", "amp", "width", "stray"} + if imagetyp in ALL: + frames.query("imagetyp == @imagetyp", inplace=True) + elif imagetyp != "all": + raise ValueError(f"Invalid imagetyp: '{imagetyp}'. Must be one of {ALL} or 'all'") + + if imagetyp == "all": + ancillary_dir = os.path.join(os.getenv["LVM_SPECTRO_REDUX"], drpver, str(mjd), "ancillary") + os.rmdir(ancillary_dir) + return ancillary_dirs = [] for frame in frames.to_dict("records"): @@ -1434,6 +1440,9 @@ def reduce_nightly_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_do else: log.warning("no twilight exposures found") + if not keep_ancillary: + _clean_ancillary(mjd) + def reduce_longterm_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_done=True, keep_ancillary=False): """Reduces the long-term calibration sequence: @@ -1481,7 +1490,6 @@ def reduce_longterm_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_d expnums_qrtz = dome_flats.query("quartz").expnum.unique() log.info(f"found {len(dome_flats)} dome flat exposures: {dome_flat_expnums}") create_traces(mjd=mjd, expnums_ldls=expnums_ldls, expnums_qrtz=expnums_qrtz, skip_done=skip_done) - # create_nightly_fiberflats(mjd=mjd, expnums_ldls=expnums_ldls, expnums_qrtz=expnums_qrtz, skip_done=skip_done) else: log.warning("no dome flat exposures found") @@ -1499,6 +1507,17 @@ def reduce_longterm_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_d else: log.warning("no twilight exposures found") + # move master calibrations to sandbox + kinds = {"bias", "trace", "width", "fiberflat", "wave", "lsf"} + for kind in kinds: + src_path = path.full("lvm_master", drpver=drpver, tileid=11111, mjd=mjd, kind=f"m{kind}") + dst_path = path.full("lvm_calib", drpver=drpver, mjd=mjd, kind=f"m{kind}") + os.makedirs(os.path.dirname(dst_path), exist_ok=True) + move(src_path, dst_path) + + if not keep_ancillary: + _clean_ancillary(mjd) + class lvmFlat(lvmFrame): """lvmFlat class""" @@ -1515,6 +1534,10 @@ def __init__(self, data=None, error=None, mask=None, self._template = dp.dump_template(dataproduct_bp=self._blueprint, save=False) +class lvmArc(lvmFrame): + pass + + if __name__ == '__main__': import tracemalloc @@ -1551,7 +1574,7 @@ def __init__(self, data=None, error=None, mask=None, # create_fiberflats(mjd=60255, expnums=expnums, median_box=10, niter=1000, threshold=(0.5,2.5), nknots=60, skip_done=True, display_plots=False) # reduce_nightly_sequence(mjd=60265, reject_cr=False, use_fiducial_cals=False, skip_done=True, keep_ancillary=True) - # reduce_longterm_sequence(mjd=60177, reject_cr=False, use_fiducial_cals=False, skip_done=True, keep_ancillary=True) + reduce_longterm_sequence(mjd=60264, reject_cr=False, use_fiducial_cals=True, skip_done=True, keep_ancillary=True) # frames = md.get_frames_metadata(60264) # frames.sort_values(by="expnum", inplace=True) @@ -1559,7 +1582,7 @@ def __init__(self, data=None, error=None, mask=None, # print(sequence) # print(frames.to_string()) - fix_raw_pixel_shifts(mjd=60412, expnums=[16148], ref_expnums=None, wave_widths=5000, y_widths=20, flat_spikes=21, threshold_spikes=0.6, skip_done=True, interactive=True, display_plots=True) + # fix_raw_pixel_shifts(mjd=60412, expnums=[16148], ref_expnums=None, wave_widths=5000, y_widths=20, flat_spikes=21, threshold_spikes=0.6, skip_done=True, interactive=True, display_plots=True) except Exception as e: # snapshot = tracemalloc.take_snapshot() From ba68c7b95a471dffd30b3ee8812645dca8aad782 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Mon, 6 May 2024 00:20:36 -0400 Subject: [PATCH 11/43] fixing bug on missing references --- python/lvmdrp/functions/run_calseq.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index acfc7e03..860b2583 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -493,7 +493,10 @@ def fix_raw_pixel_shifts(mjd, expnums=None, ref_expnums=None, use_fiducial_cals= expnum=0, camera=f"sp{spec}", kind="") if len(rframe_paths) < 3: - log.warning(f"skipping {rframe_paths}, less than 3 files found") + log.warning(f"skipping {rframe_paths = }, less than 3 files found") + continue + if len(cframe_paths) < 3: + log.warning(f"skipping {cframe_paths = }, less than 3 files found") continue if use_fiducial_cals: From 852052812e7f5f67805cd206765d33bb65567321 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Mon, 6 May 2024 01:23:06 -0400 Subject: [PATCH 12/43] fixing hard-coded parameters in tracing --- python/lvmdrp/functions/imageMethod.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/lvmdrp/functions/imageMethod.py b/python/lvmdrp/functions/imageMethod.py index 7362b309..70e3dd4e 100644 --- a/python/lvmdrp/functions/imageMethod.py +++ b/python/lvmdrp/functions/imageMethod.py @@ -5044,8 +5044,9 @@ def trace_fibers( trace_fwhm._header["IMAGETYP"] = "trace_fwhm" trace_amp, trace_cent, trace_fwhm, columns, mod_columns, residuals = img.trace_fiber_widths(centroids, ref_column=LVM_REFERENCE_COLUMN, - ncolumns=ncolumns, nblocks=LVM_NBLOCKS, iblocks=[], - fwhm_guess=guess_fwhm, fwhm_range=[1.0,3.5], counts_threshold=counts_threshold) + ncolumns=ncolumns, nblocks=LVM_NBLOCKS, iblocks=iblocks, + fwhm_guess=guess_fwhm, fwhm_range=fwhm_limits, + counts_threshold=counts_threshold) # smooth all trace by a polynomial if fit_poly: From cc638c2756c832467af7f6c6f813711f2134b47d Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Mon, 6 May 2024 12:45:43 -0400 Subject: [PATCH 13/43] improved handling of invalid errors & fixed writting of output traces --- python/lvmdrp/functions/imageMethod.py | 6 +++++ python/lvmdrp/functions/run_calseq.py | 37 +++++++++++++------------- 2 files changed, 25 insertions(+), 18 deletions(-) diff --git a/python/lvmdrp/functions/imageMethod.py b/python/lvmdrp/functions/imageMethod.py index 70e3dd4e..d8565f39 100644 --- a/python/lvmdrp/functions/imageMethod.py +++ b/python/lvmdrp/functions/imageMethod.py @@ -4839,6 +4839,9 @@ def trace_centroids(in_image: str, img = img.convolveImg(coadd_kernel) counts_threshold = counts_threshold * coadd + # handle invalid error values + img._error[img._mask|(img._error<=0)] = numpy.inf + # calculate centroids for reference column if correct_ref: ref_cent = img.match_reference_column(ref_column=LVM_REFERENCE_COLUMN) @@ -5032,6 +5035,9 @@ def trace_fibers( img = img.convolveImg(coadd_kernel) counts_threshold = counts_threshold * coadd + # handle invalid error values + img._error[img._mask|(img._error<=0)] = numpy.inf + # trace centroids in each column log.info(f"loading guess fiber centroids from '{os.path.basename(in_trace_cent_guess)}'") centroids = TraceMask.from_file(in_trace_cent_guess) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index 860b2583..b541c455 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -1007,6 +1007,7 @@ def create_traces(mjd, use_fiducial_cals=True, expnums_ldls=None, expnums_qrtz=N img_stray._data = np.nan_to_num(img_stray._data) img_stray = img_stray.medianImg((1,10), propagate_error=True) img_stray = img_stray.convolveImg(np.ones((1, 20), dtype="uint8")) + img_stray._error[img_stray._mask|(img_stray._error<=0)] = np.inf else: log.info(f"going to trace std fiber {fiber_str} in {camera} within {block_idxs = }") centroids, trace_cent_fit, trace_flux_fit, trace_fwhm_fit, img_stray, model, mratio = image_tasks.trace_fibers( @@ -1062,24 +1063,24 @@ def create_traces(mjd, use_fiducial_cals=True, expnums_ldls=None, expnums_qrtz=N mcents[camera].interpolate_data(axis="Y", extrapolate=True) mwidths[camera].interpolate_data(axis="Y", extrapolate=True) - # reset mask to propagate broken fibers - mamps[camera]._mask[bad_fibers] = True - mcents[camera]._mask[bad_fibers] = True - mwidths[camera]._mask[bad_fibers] = True + # reset mask to propagate broken fibers + mamps[camera]._mask[bad_fibers] = True + mcents[camera]._mask[bad_fibers] = True + mwidths[camera]._mask[bad_fibers] = True - # save master traces - mamp_path = path.full("lvm_master", drpver=drpver, tileid=tileid, mjd=mjd, camera=camera, kind="mamps") - mcent_path = path.full("lvm_master", drpver=drpver, tileid=tileid, mjd=mjd, camera=camera, kind="mtrace") - mwidth_path = path.full("lvm_master", drpver=drpver, tileid=tileid, mjd=mjd, camera=camera, kind="mwidth") - os.makedirs(os.path.dirname(mamp_path), exist_ok=True) - mamps[camera].writeFitsData(mamp_path) - mcents[camera].writeFitsData(mcent_path) - mwidths[camera].writeFitsData(mwidth_path) + # save master traces + mamp_path = path.full("lvm_master", drpver=drpver, tileid=tileid, mjd=mjd, camera=camera, kind="mamps") + mcent_path = path.full("lvm_master", drpver=drpver, tileid=tileid, mjd=mjd, camera=camera, kind="mtrace") + mwidth_path = path.full("lvm_master", drpver=drpver, tileid=tileid, mjd=mjd, camera=camera, kind="mwidth") + os.makedirs(os.path.dirname(mamp_path), exist_ok=True) + mamps[camera].writeFitsData(mamp_path) + mcents[camera].writeFitsData(mcent_path) + mwidths[camera].writeFitsData(mwidth_path) - # eval model continuum and ratio - model, ratio = img_stray.eval_fiber_model(mamps[camera], mcents[camera], mwidths[camera]) - model.writeFitsData(dmodel_path) - ratio.writeFitsData(dratio_path) + # eval model continuum and ratio + model, ratio = img_stray.eval_fiber_model(mamps[camera], mcents[camera], mwidths[camera]) + model.writeFitsData(dmodel_path) + ratio.writeFitsData(dratio_path) def create_fiberflats(mjd: int, use_fiducial_cals: bool = True, expnums: List[int] = None, median_box: int = 10, niter: bool = 1000, @@ -1568,7 +1569,7 @@ class lvmArc(lvmFrame): # create_detrending_frames(mjd=60171, masters_mjd=60142, expnums=np.arange(3098, 3117+1), kind="dark", assume_imagetyp="pixelflat", reject_cr=False, skip_done=False) # create_detrending_frames(mjd=60255, kind="bias", skip_done=False) - # create_traces(mjd=MJD, expnums_ldls=ldls_expnums, expnums_qrtz=qrtz_expnums, subtract_straylight=True) + # create_traces(mjd=MJD, expnums_ldls=ldls_expnums, expnums_qrtz=qrtz_expnums) # create_wavelengths(mjd=60255, masters_mjd=60255, expnums=np.arange(7276,7323+1), skip_done=True) # expnums = [7231] @@ -1577,7 +1578,7 @@ class lvmArc(lvmFrame): # create_fiberflats(mjd=60255, expnums=expnums, median_box=10, niter=1000, threshold=(0.5,2.5), nknots=60, skip_done=True, display_plots=False) # reduce_nightly_sequence(mjd=60265, reject_cr=False, use_fiducial_cals=False, skip_done=True, keep_ancillary=True) - reduce_longterm_sequence(mjd=60264, reject_cr=False, use_fiducial_cals=True, skip_done=True, keep_ancillary=True) + reduce_longterm_sequence(mjd=60264, reject_cr=False, use_fiducial_cals=True, skip_done=False, keep_ancillary=True) # frames = md.get_frames_metadata(60264) # frames.sort_values(by="expnum", inplace=True) From 576cc57c1e4fd790edaa5b7780a5fd8cbc34a0cd Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Mon, 6 May 2024 23:50:52 -0400 Subject: [PATCH 14/43] returning tck tuple --- python/lvmdrp/functions/run_twilights.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/python/lvmdrp/functions/run_twilights.py b/python/lvmdrp/functions/run_twilights.py index 8e11061c..b517343d 100644 --- a/python/lvmdrp/functions/run_twilights.py +++ b/python/lvmdrp/functions/run_twilights.py @@ -232,8 +232,8 @@ def fit_continuum(spectrum: Spectrum1D, mask_bands: List[Tuple[float,float]], List of continuum models for each iteration masked_pixels : np.ndarray Masked pixels in all iterations - knots : np.ndarray - Spline knots + tck : tuple + Spline parameters """ # early return if no good pixels continuum_models = [] @@ -257,8 +257,8 @@ def fit_continuum(spectrum: Spectrum1D, mask_bands: List[Tuple[float,float]], kwargs.update({"t": knots, "task": -1}) # fit first spline - f = interpolate.splrep(wave, data, **kwargs) - spline = interpolate.splev(spectrum._wave, f) + tck = interpolate.splrep(wave, data, **kwargs) + spline = interpolate.splev(spectrum._wave, tck) # iterate to mask outliers and update spline if threshold is not None and isinstance(threshold, (float, int)): @@ -272,8 +272,8 @@ def fit_continuum(spectrum: Spectrum1D, mask_bands: List[Tuple[float,float]], masked_pixels |= mask # update spline - f = interpolate.splrep(spectrum._wave[~masked_pixels], spectrum._data[~masked_pixels], **kwargs) - new_spline = interpolate.splev(spectrum._wave, f) + tck = interpolate.splrep(spectrum._wave[~masked_pixels], spectrum._data[~masked_pixels], **kwargs) + new_spline = interpolate.splev(spectrum._wave, tck) continuum_models.append(new_spline) if np.mean(np.abs(new_spline - spline) / spline) <= 0.01: break @@ -281,7 +281,7 @@ def fit_continuum(spectrum: Spectrum1D, mask_bands: List[Tuple[float,float]], spline = new_spline best_continuum = continuum_models.pop(-1) - return best_continuum, continuum_models, masked_pixels, knots + return best_continuum, continuum_models, masked_pixels, tck def fit_fiberflat(in_twilight: str, out_flat: str, out_rss: str, interpolate_bad: bool = True, mask_bands: List[Tuple[float,float]] = [], median_box: int = 5, niter: int = 1000, threshold: Tuple[float,float]|float = (0.5,2.0), From b19daaf42dee07757bd859036454513e038ad1d9 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Tue, 7 May 2024 00:10:03 -0400 Subject: [PATCH 15/43] fixing sequence selection --- python/lvmdrp/functions/run_calseq.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index b541c455..160aa3c1 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -136,11 +136,12 @@ def choose_sequence(frames, flavor, kind): query = "imagetyp == 'flat' and ldls|quartz" elif flavor == "arc": query = "imagetyp == 'arc' and not ldls|quartz and neon|hgne|argon|xenon" - expnums = frames.query(query).expnum.unique() + expnums = np.sort(frames.query(query).expnum.unique()) diff = np.diff(expnums) - div, = np.where(diff > 1) + div, = np.where(np.abs(diff) > 1) sequences = np.split(expnums, div+1) + [seq.sort() for seq in sequences] log.info(f"found sequences: {sequences}") if len(sequences) == 0: From 0ffac1289443c163877e85e4399fedc213fda81a Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Tue, 7 May 2024 00:13:58 -0400 Subject: [PATCH 16/43] fixing moving of long-term cals to sandbox --- python/lvmdrp/functions/run_calseq.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index 160aa3c1..7d48d614 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -1515,10 +1515,11 @@ def reduce_longterm_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_d # move master calibrations to sandbox kinds = {"bias", "trace", "width", "fiberflat", "wave", "lsf"} for kind in kinds: - src_path = path.full("lvm_master", drpver=drpver, tileid=11111, mjd=mjd, kind=f"m{kind}") - dst_path = path.full("lvm_calib", drpver=drpver, mjd=mjd, kind=f"m{kind}") - os.makedirs(os.path.dirname(dst_path), exist_ok=True) - move(src_path, dst_path) + for camera in {"b1", "b2", "b3", "r1", "r2", "r3", "z1", "z2", "z3"}: + src_path = path.full("lvm_master", drpver=drpver, tileid=11111, mjd=mjd, kind=f"m{kind}", camera=camera) + dst_path = path.full("lvm_calib", mjd=mjd, kind=kind, camera=camera) + os.makedirs(os.path.dirname(dst_path), exist_ok=True) + move(src_path, dst_path) if not keep_ancillary: _clean_ancillary(mjd) From 1271ce736393ad8866677b380c74e04eef595b79 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Tue, 7 May 2024 00:56:24 -0400 Subject: [PATCH 17/43] fixing sequences order --- python/lvmdrp/functions/run_calseq.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index 7d48d614..b88a4a05 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -1423,8 +1423,8 @@ def reduce_nightly_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_do dome_flats = frames.query("imagetyp == 'flat' and ldls|quartz") if len(dome_flats) != 0: - expnums_ldls = dome_flats.query("ldls").expnum.unique() - expnums_qrtz = dome_flats.query("quartz").expnum.unique() + expnums_ldls = np.sort(dome_flats.query("ldls").expnum.unique()) + expnums_qrtz = np.sort(dome_flats.query("quartz").expnum.unique()) log.info(f"found {len(dome_flats)} dome flat exposures: {set(dome_flats.expnum)}") create_nighly_traces(mjd=mjd, expnums_ldls=expnums_ldls, expnums_qrtz=expnums_qrtz, skip_done=skip_done) # create_nightly_fiberflats(mjd=mjd, expnums_ldls=expnums_ldls, expnums_qrtz=expnums_qrtz, skip_done=skip_done) @@ -1434,14 +1434,14 @@ def reduce_nightly_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_do arcs = frames.query("imagetyp == 'arc' and not ldls|quartz and neon|hgne|argon|xenon") if len(arcs) != 0: log.info(f"found {len(arcs)} arc exposures: {set(arcs.expnum)}") - create_wavelengths(mjd=mjd, expnums=arcs.expnum.unique(), skip_done=skip_done) + create_wavelengths(mjd=mjd, expnums=np.sort(arcs.expnum.unique()), skip_done=skip_done) else: log.warning("no arc exposures found") twilight_flats = frames.query("imagetyp == 'flat' and not ldls|quartz") if len(twilight_flats) != 0: log.info(f"found {len(twilight_flats)} twilight exposures: {set(twilight_flats.expnum)}") - create_fiberflats(mjd=mjd, expnums=sorted(twilight_flats.expnum.unique()), skip_done=skip_done) + create_fiberflats(mjd=mjd, expnums=sorted(np.sort(twilight_flats.expnum.unique())), skip_done=skip_done) else: log.warning("no twilight exposures found") @@ -1491,8 +1491,8 @@ def reduce_longterm_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_d dome_flats, dome_flat_expnums = choose_sequence(frames, flavor="flat", kind="longterm") if len(dome_flats) != 0: - expnums_ldls = dome_flats.query("ldls").expnum.unique() - expnums_qrtz = dome_flats.query("quartz").expnum.unique() + expnums_ldls = np.sort(dome_flats.query("ldls").expnum.unique()) + expnums_qrtz = np.sort(dome_flats.query("quartz").expnum.unique()) log.info(f"found {len(dome_flats)} dome flat exposures: {dome_flat_expnums}") create_traces(mjd=mjd, expnums_ldls=expnums_ldls, expnums_qrtz=expnums_qrtz, skip_done=skip_done) else: @@ -1501,7 +1501,7 @@ def reduce_longterm_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_d arcs, arc_expnums = choose_sequence(frames, flavor="arc", kind="longterm") if len(arcs) != 0: log.info(f"found {len(arcs)} arc exposures: {arc_expnums}") - create_wavelengths(mjd=mjd, expnums=arcs.expnum.unique(), skip_done=skip_done) + create_wavelengths(mjd=mjd, expnums=np.sort(arcs.expnum.unique()), skip_done=skip_done) else: log.warning("no arc exposures found") From 90414d97670399cdaa5a94d1bce7390b92c31857 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Wed, 8 May 2024 18:04:44 -0400 Subject: [PATCH 18/43] fixing hard-coded values in fiber thermal shifts, improved stats & log --- python/lvmdrp/core/image.py | 2 +- python/lvmdrp/functions/imageMethod.py | 20 +++++++++++--------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/python/lvmdrp/core/image.py b/python/lvmdrp/core/image.py index 030c9d5e..c01cf216 100644 --- a/python/lvmdrp/core/image.py +++ b/python/lvmdrp/core/image.py @@ -737,7 +737,7 @@ def measure_fiber_shifts(self, ref_image, columns=[500, 1000, 1500, 2000, 2500, for j,c in enumerate(columns): s1 = numpy.nanmedian(ref_data[50:-50,c-column_width:c+column_width], axis=1) s2 = numpy.nanmedian(self._data[50:-50,c-column_width:c+column_width], axis=1) - _, shifts[j], _ = _cross_match_float(s1, s2, numpy.array([1.0]), [-5, 5]) + _, shifts[j], _ = _cross_match_float(s1, s2, numpy.array([1.0]), shift_range) return shifts diff --git a/python/lvmdrp/functions/imageMethod.py b/python/lvmdrp/functions/imageMethod.py index d8565f39..49781887 100644 --- a/python/lvmdrp/functions/imageMethod.py +++ b/python/lvmdrp/functions/imageMethod.py @@ -380,13 +380,17 @@ def _fix_fiber_thermal_shifts(image, trace_cent, trace_width=None, trace_amp=Non # calculate thermal shifts column_shifts = image.measure_fiber_shifts(model, columns=columns, column_width=column_width, shift_range=shift_range) # shifts stats - mean_shifts = numpy.nanmean(column_shifts, axis=0) - std_shifts = numpy.nanstd(column_shifts, axis=0) - log.info(f"shift in fibers: {mean_shifts:.4f}+/-{std_shifts:.4f} pixels") + median_shift = numpy.nanmedian(column_shifts, axis=0) + std_shift = numpy.nanstd(column_shifts, axis=0) + if median_shift > 1: + log.warning(f"large thermal shift measured: {column_shifts} pixels") + log.warning(f"{median_shift = :.4f}+/-{std_shift = :.4f} pixels") + else: + log.info(f"median shift in fibers: {median_shift = :.4f}+/-{std_shift = :.4f} pixels") # apply average shift to the zeroth order trace coefficients trace_cent_fixed = copy(trace_cent) - trace_cent_fixed._coeffs[:, 0] += mean_shifts + trace_cent_fixed._coeffs[:, 0] += median_shift trace_cent_fixed.eval_coeffs() # deltas = TraceMask(data=numpy.zeros_like(trace_cent._data), mask=numpy.ones_like(trace_cent._data, dtype=bool)) @@ -3091,7 +3095,7 @@ def extract_spectra( in_trace: str, in_fwhm: str = None, in_acorr: str = None, - columns: List[int] = [500, 1500, 2000, 2500, 3500], + columns: List[int] = [500, 1000, 1500, 2000, 2500, 3000], column_width: int = 20, method: str = "optimal", aperture: int = 3, @@ -3165,8 +3169,7 @@ def extract_spectra( trace_mask, shifts, _ = _fix_fiber_thermal_shifts(img, trace_mask, trace_fwhm, trace_amp=10000, columns=columns, - column_width=column_width, - shift_range=[-5,5]) + column_width=column_width) # set up parallel run if parallel == "auto": @@ -3218,8 +3221,7 @@ def extract_spectra( trace_mask, shifts, _ = _fix_fiber_thermal_shifts(img, trace_mask, trace_fwhm or 2.5, trace_amp=10000, columns=columns, - column_width=column_width, - shift_range=[-5,5]) + column_width=column_width) (data, error, mask) = img.extractSpecAperture(trace_mask, aperture) From af7dd3bfe64fd0dc9d71df7d88ec2594a6e169e4 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Wed, 8 May 2024 19:34:17 -0400 Subject: [PATCH 19/43] improving stray light subtraction for science frames --- python/lvmdrp/core/image.py | 7 +++++- python/lvmdrp/functions/imageMethod.py | 32 ++++++++++++++++---------- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/python/lvmdrp/core/image.py b/python/lvmdrp/core/image.py index c01cf216..ed16f8b8 100644 --- a/python/lvmdrp/core/image.py +++ b/python/lvmdrp/core/image.py @@ -1983,7 +1983,7 @@ def fitPoly(self, axis="y", order=4, plot=-1): new_img.setData(data=fit_result, mask=new_mask) return new_img - def fitSpline(self, axis="y", degree=3, smoothing=0, use_weights=False): + def fitSpline(self, axis="y", degree=3, smoothing=0, use_weights=False, clip=None): """Fits a spline to the image along a given axis Parameters @@ -1997,6 +1997,8 @@ def fitSpline(self, axis="y", degree=3, smoothing=0, use_weights=False): smoothing factor for the spline fit, by default 0 use_weights : bool, optional whether to use the inverse variance as weights for the spline fit or not, by default False + clip : tuple, optional + minimum and maximum values to clip the spline model, by default None Returns ------- @@ -2013,6 +2015,7 @@ def fitSpline(self, axis="y", degree=3, smoothing=0, use_weights=False): models = [] for i in range(self._dim[1]): good_pix = ~self._mask[:,i] if self._mask is not None else ~numpy.isnan(self._data[:,i]) + # good_pix = ~self._mask[:, i] if self._mask is not None else numpy.ones(self._dim[0], dtype=bool) # skip column if all pixels are masked if good_pix.sum() == 0: @@ -2063,6 +2066,8 @@ def fitSpline(self, axis="y", degree=3, smoothing=0, use_weights=False): else: spline_pars = interpolate.splrep(masked_pixels, data, s=smoothing) model = interpolate.splev(pixels, spline_pars) + if clip is not None: + model = numpy.clip(model, clip[0], clip[1]) models.append(model) # reconstruct the model image diff --git a/python/lvmdrp/functions/imageMethod.py b/python/lvmdrp/functions/imageMethod.py index 49781887..67af0985 100644 --- a/python/lvmdrp/functions/imageMethod.py +++ b/python/lvmdrp/functions/imageMethod.py @@ -2490,13 +2490,18 @@ def subtract_straylight( # load image data log.info(f"using image {os.path.basename(in_image)} for stray light subtraction") img = loadImage(in_image) + img.setData(data=numpy.nan_to_num(img._data), error=numpy.nan_to_num(img._error)) unit = img._header["BUNIT"] # smooth image along dispersion axis with a median filter excluded NaN values if median_box is not None: log.info(f"median filtering image along dispersion axis with a median filter of width {median_box}") - median_box = max(1, median_box) - img_median = img.medianImg((1, median_box), use_mask=True) + median_box = (1, max(1, median_box)) + img_median = img.replaceMaskMedian(*median_box, replace_error=None) + img_median._data = numpy.nan_to_num(img_median._data) + img_median = img_median.medianImg(median_box, use_mask=True) + img_median._mask = img._mask + else: img_median = copy(img) @@ -2510,6 +2515,10 @@ def subtract_straylight( img_median._mask = numpy.zeros(img_median._data.shape, dtype=bool) img_median._mask = img_median._mask | numpy.isnan(img_median._data) | numpy.isinf(img_median._data) | (img_median._data == 0) + # mask regions around each fiber within a given cross-dispersion aperture + log.info(f"masking fibers with an aperture of {aperture} pixels") + img_median.maskFiberTraces(trace_mask, aperture=aperture, parallel=parallel) + # mask regions around the top and bottom of the CCD if isinstance(select_nrows, int): select_tnrows = select_nrows @@ -2523,23 +2532,22 @@ def subtract_straylight( for icol in range(img_median._dim[1]): # mask top/bottom rows before/after first/last fiber - img_median._mask[tfiber[icol]:] = True - img_median._mask[:bfiber[icol]] = True + img_median._mask[tfiber[icol]:, icol] = True + img_median._mask[:bfiber[icol], icol] = True # unmask select_nrows around each region - img_median._mask[(tfiber[icol]+img_median._dim[0]-select_tnrows)//2:(tfiber[icol]+img_median._dim[0]+select_tnrows)//2] = False - img_median._mask[(bfiber[icol]-select_bnrows)//2:(bfiber[icol]+select_bnrows)//2] = False - - # mask regions around each fiber within a given cross-dispersion aperture - log.info(f"masking fibers with an aperture of {aperture} pixels") - img_median.maskFiberTraces(trace_mask, aperture=aperture, parallel=parallel) + img_median._mask[(tfiber[icol]+aperture//2):(tfiber[icol]+aperture//2+select_tnrows), icol] = False + img_median._mask[(bfiber[icol]-aperture//2-select_bnrows):(bfiber[icol]-aperture//2), icol] = False # fit the signal in unmaksed areas along cross-dispersion axis by a polynomial log.info(f"fitting spline with {smoothing = } to the background signal along cross-dispersion axis") - img_fit = img_median.fitSpline(smoothing=smoothing, use_weights=use_weights) + img_fit = img_median.fitSpline(smoothing=smoothing, use_weights=use_weights, clip=(0.0, None)) + + # median filter to reject outlying columns + img_fit = img_fit.medianImg((1, 7)) # smooth the results by 2D Gaussian filter of given with (cross- and dispersion axis have equal width) log.info(f"smoothing the background signal by a 2D Gaussian filter of width {gaussian_sigma}") - img_stray = img_fit.convolveGaussImg(gaussian_sigma, gaussian_sigma) + img_stray = img_fit.convolveGaussImg(1, gaussian_sigma) img_stray.setData(data=numpy.nan_to_num(img_stray._data), error=numpy.nan_to_num(img_stray._error)) # subtract smoothed background signal from original image From 7dc464348e4236ed94b8b4dd0300d3159129c495 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Wed, 8 May 2024 19:35:21 -0400 Subject: [PATCH 20/43] implementing stray light subtraction for science reductions --- python/lvmdrp/functions/run_quickdrp.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/python/lvmdrp/functions/run_quickdrp.py b/python/lvmdrp/functions/run_quickdrp.py index f1a742cf..b328188b 100644 --- a/python/lvmdrp/functions/run_quickdrp.py +++ b/python/lvmdrp/functions/run_quickdrp.py @@ -190,12 +190,9 @@ def quick_science_reduction(expnum: int, use_fiducial_master: bool = False, image_tasks.add_astrometry(in_image=dsci_path, out_image=dsci_path, in_agcsci_image=agcsci_path, in_agcskye_image=agcskye_path, in_agcskyw_image=agcskyw_path) # subtract straylight - if sci_imagetyp == "flat": - image_tasks.subtract_straylight(in_image=dsci_path, out_image=lsci_path, - in_cent_trace=mtrace_path, select_nrows=5, - aperture=13, smoothing=400, median_box=21, gaussian_sigma=0.0) - else: - lsci_path = dsci_path + image_tasks.subtract_straylight(in_image=lsci_path, out_image=lsci_path, out_stray=lstr_path, + in_cent_trace=mtrace_path, select_nrows=(5,5), use_weights=True, + aperture=15, smoothing=400, median_box=101, gaussian_sigma=20.0) # extract 1d spectra image_tasks.extract_spectra(in_image=dsci_path, out_rss=xsci_path, in_trace=mtrace_path, in_fwhm=mwidth_path, From b14dfe94941d7db8f73d3c4c1595832524f18f10 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Thu, 9 May 2024 09:18:16 -0400 Subject: [PATCH 21/43] fixing handling of new paths in fiducial calibrations --- python/lvmdrp/functions/run_calseq.py | 36 ++++++++++++++++++++------- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index b88a4a05..ed5dce39 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -228,6 +228,29 @@ def _get_reference_expnum(frame, ref_frames): return ref_expnums[idx] +def _move_master_calibrations(mjd, kind=None): + + kinds = {"bias", "trace", "width", "fiberflat", "wave", "lsf"} + + if isinstance(kind, list, tuple, np.ndarray): + kinds = kind + elif isinstance(kind, str) and kind in kinds: + kinds = {kind} + elif kind is None: + pass + else: + raise ValueError(f"kind must be one of {kinds}") + + for kind in kinds: + src_paths = path.expand("lvm_master", drpver=drpver, tileid=11111, mjd=mjd, kind=f"m{kind}", camera="*") + for src_path in src_paths: + camera = path.extract("lvm_master", src_path)["camera"] + dst_path = path.full("lvm_calib", mjd=mjd, kind=kind, camera=camera) + if os.path.isfile(src_path): + os.makedirs(os.path.dirname(dst_path), exist_ok=True) + copy2(src_path, dst_path) + + def _clean_ancillary(mjd, expnums=None, imagetyp="all"): """Clean ancillary files @@ -1486,6 +1509,7 @@ def reduce_longterm_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_d if len(biases) != 0: log.info(f"found {len(biases)} bias exposures: {bias_expnums}") create_detrending_frames(mjd=mjd, expnums=bias_expnums, kind="bias", use_fiducial_cals=use_fiducial_cals, skip_done=skip_done, keep_ancillary=keep_ancillary) + _move_master_calibrations(mjd=mjd, kind="bias") else: log.warning("no bias exposures found") @@ -1495,6 +1519,7 @@ def reduce_longterm_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_d expnums_qrtz = np.sort(dome_flats.query("quartz").expnum.unique()) log.info(f"found {len(dome_flats)} dome flat exposures: {dome_flat_expnums}") create_traces(mjd=mjd, expnums_ldls=expnums_ldls, expnums_qrtz=expnums_qrtz, skip_done=skip_done) + _move_master_calibrations(mjd=mjd, kind={"trace", "width"}) else: log.warning("no dome flat exposures found") @@ -1502,6 +1527,7 @@ def reduce_longterm_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_d if len(arcs) != 0: log.info(f"found {len(arcs)} arc exposures: {arc_expnums}") create_wavelengths(mjd=mjd, expnums=np.sort(arcs.expnum.unique()), skip_done=skip_done) + _move_master_calibrations(mjd=mjd, kind={"wave", "lsf"}) else: log.warning("no arc exposures found") @@ -1509,18 +1535,10 @@ def reduce_longterm_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_d if len(twilight_flats) != 0: log.info(f"found {len(twilight_flats)} twilight exposures: {twilight_expnums}") create_fiberflats(mjd=mjd, expnums=twilight_expnums, skip_done=skip_done) + _move_master_calibrations(mjd=mjd, kind="fiberflat_twilight") else: log.warning("no twilight exposures found") - # move master calibrations to sandbox - kinds = {"bias", "trace", "width", "fiberflat", "wave", "lsf"} - for kind in kinds: - for camera in {"b1", "b2", "b3", "r1", "r2", "r3", "z1", "z2", "z3"}: - src_path = path.full("lvm_master", drpver=drpver, tileid=11111, mjd=mjd, kind=f"m{kind}", camera=camera) - dst_path = path.full("lvm_calib", mjd=mjd, kind=kind, camera=camera) - os.makedirs(os.path.dirname(dst_path), exist_ok=True) - move(src_path, dst_path) - if not keep_ancillary: _clean_ancillary(mjd) From 661f465041bb9ac6975fc7a7772ab6dd3d7ea8ac Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Thu, 9 May 2024 10:35:57 -0400 Subject: [PATCH 22/43] fixing minor bug --- python/lvmdrp/functions/run_calseq.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index ed5dce39..6579ca63 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -231,8 +231,7 @@ def _get_reference_expnum(frame, ref_frames): def _move_master_calibrations(mjd, kind=None): kinds = {"bias", "trace", "width", "fiberflat", "wave", "lsf"} - - if isinstance(kind, list, tuple, np.ndarray): + if isinstance(kind, (list, tuple, np.ndarray)): kinds = kind elif isinstance(kind, str) and kind in kinds: kinds = {kind} From 180c78475120ec1516b26f7226b55dd083a457d0 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Thu, 9 May 2024 19:03:44 -0400 Subject: [PATCH 23/43] fixing bug in straylight subtraction --- python/lvmdrp/functions/imageMethod.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/python/lvmdrp/functions/imageMethod.py b/python/lvmdrp/functions/imageMethod.py index 67af0985..5384b7f9 100644 --- a/python/lvmdrp/functions/imageMethod.py +++ b/python/lvmdrp/functions/imageMethod.py @@ -2490,7 +2490,6 @@ def subtract_straylight( # load image data log.info(f"using image {os.path.basename(in_image)} for stray light subtraction") img = loadImage(in_image) - img.setData(data=numpy.nan_to_num(img._data), error=numpy.nan_to_num(img._error)) unit = img._header["BUNIT"] # smooth image along dispersion axis with a median filter excluded NaN values @@ -2500,8 +2499,6 @@ def subtract_straylight( img_median = img.replaceMaskMedian(*median_box, replace_error=None) img_median._data = numpy.nan_to_num(img_median._data) img_median = img_median.medianImg(median_box, use_mask=True) - img_median._mask = img._mask - else: img_median = copy(img) @@ -2543,21 +2540,22 @@ def subtract_straylight( img_fit = img_median.fitSpline(smoothing=smoothing, use_weights=use_weights, clip=(0.0, None)) # median filter to reject outlying columns - img_fit = img_fit.medianImg((1, 7)) + img_fit = img_fit.medianImg((1, 7), use_mask=True) - # smooth the results by 2D Gaussian filter of given with (cross- and dispersion axis have equal width) + # smooth the results by 2D Gaussian filter of given width log.info(f"smoothing the background signal by a 2D Gaussian filter of width {gaussian_sigma}") img_stray = img_fit.convolveGaussImg(1, gaussian_sigma) - img_stray.setData(data=numpy.nan_to_num(img_stray._data), error=numpy.nan_to_num(img_stray._error)) # subtract smoothed background signal from original image log.info("subtracting the smoothed background signal from the original image") - img_out = img - img_stray + img_out = loadImage(in_image) + img_out._data = img_out._data - img_stray._data + img_out.setData(data=0.0, error=numpy.inf, select=(img_out._data==0)|(img_out._error==0), inplace=True) + img_out.setData(mask=True, select=(img_out._data==0)|~numpy.isfinite(img_out._error), inplace=True) # include header and write out file log.info(f"writing stray light subtracted image to {os.path.basename(out_image)}") img_out.setHeader(header=img.getHeader()) - img_out.setData(mask=img._mask) img_out.writeFitsData(out_image) # plot results: polyomial fitting & smoothing, both with masked regions on From 6451f5138dda6561b36cf679fabe14333796af88 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Fri, 10 May 2024 01:39:17 -0400 Subject: [PATCH 24/43] fixing fit_continuum output --- python/lvmdrp/functions/run_twilights.py | 4 ++-- tests/functions/test_runtwilights.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/python/lvmdrp/functions/run_twilights.py b/python/lvmdrp/functions/run_twilights.py index b517343d..dd03f6fb 100644 --- a/python/lvmdrp/functions/run_twilights.py +++ b/python/lvmdrp/functions/run_twilights.py @@ -367,7 +367,7 @@ def fit_fiberflat(in_twilight: str, out_flat: str, out_rss: str, interpolate_bad ori_fiber = ori_flat[ifiber] try: - best_continuum, continuum_models, masked_pixels, knots = fit_continuum( + best_continuum, continuum_models, masked_pixels, tck = fit_continuum( spectrum=fiber, mask_bands=mask_bands, median_box=median_box, niter=niter, threshold=threshold, **kwargs ) @@ -390,7 +390,7 @@ def fit_fiberflat(in_twilight: str, out_flat: str, out_rss: str, interpolate_bad for continuum_model in continuum_models: axs[iax].plot(fiber._wave[masked_pixels], fiber._data[masked_pixels], ".", color="tab:blue", ms=5, mew=0) axs[iax].plot(fiber._wave, continuum_model, color="tab:red", lw=1, alpha=0.5, zorder=niter) - axs[iax].plot(knots, np.zeros_like(knots), ".k") + axs[iax].plot(tck[0], np.zeros_like(tck[0]), ".k") axs[iax].step(fiber._wave, best_continuum, color="tab:red", lw=2) new_flat._data[ifiber] = best_continuum diff --git a/tests/functions/test_runtwilights.py b/tests/functions/test_runtwilights.py index 1300aca7..9c008885 100644 --- a/tests/functions/test_runtwilights.py +++ b/tests/functions/test_runtwilights.py @@ -95,11 +95,11 @@ def test_get_sequence_metadata_no_files(): def test_fit_continuum(): """ test continuum fitting """ spectrum = make_fake_spectrum() - best_continuum, models, masked_pixels, knots = fit_continuum(spectrum, mask_bands=[(3100,3200)], median_box=1, niter=10, threshold=0.5) + best_continuum, models, masked_pixels, tck = fit_continuum(spectrum, mask_bands=[(3100,3200)], median_box=1, niter=10, threshold=0.5) assert best_continuum.size == spectrum._data.size assert len(models) == 0 assert masked_pixels.sum() == 12 - assert len(knots) == 95 + assert len(tck[0]) == 95 assert best_continuum[0] == pytest.approx(0, abs=1e-10) assert best_continuum[-1] == pytest.approx(0, abs=1e-10) From 3942eec9f1c6cb7b76be3ea733743f0296e52d46 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Fri, 10 May 2024 10:27:25 -0400 Subject: [PATCH 25/43] making stray light subtraction consistent all across --- python/lvmdrp/functions/run_calseq.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index 6579ca63..579d3739 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -877,9 +877,8 @@ def create_nighly_traces(mjd, use_fiducial_cals=True, expnums_ldls=None, expnums log.info(f"skipping {lflat_path}, file already exist") else: image_tasks.subtract_straylight(in_image=dflat_path, out_image=lflat_path, out_stray=dstray_path, - in_cent_trace=cent_guess_path, select_nrows=5, - aperture=13, smoothing=400, median_box=21, - gaussian_sigma=0.0) + in_cent_trace=cent_guess_path, select_nrows=(5,5), use_weights=True, + aperture=15, smoothing=400, median_box=101, gaussian_sigma=20.0) if skip_done and os.path.isfile(flux_path) and os.path.isfile(cent_path) and os.path.isfile(fwhm_path): log.info(f"skipping {flux_path}, {cent_path} and {fwhm_path}, files already exist") @@ -1015,9 +1014,8 @@ def create_traces(mjd, use_fiducial_cals=True, expnums_ldls=None, expnums_qrtz=N log.info(f"skipping {lflat_path}, file already exist") else: image_tasks.subtract_straylight(in_image=dflat_path, out_image=lflat_path, out_stray=dstray_path, - in_cent_trace=cent_guess_path, select_nrows=5, - aperture=13, smoothing=400, median_box=21, - gaussian_sigma=0.0) + in_cent_trace=cent_guess_path, select_nrows=(5,5), use_weights=True, + aperture=15, smoothing=400, median_box=101, gaussian_sigma=20.0) if skip_done and os.path.isfile(flux_path): log.info(f"skipping {flux_path}, file already exist") @@ -1189,9 +1187,8 @@ def create_fiberflats(mjd: int, use_fiducial_cals: bool = True, expnums: List[in log.info(f"skipping {lflat_path}, file already exist") else: image_tasks.subtract_straylight(in_image=dflat_path, out_image=lflat_path, out_stray=stray_path, - in_cent_trace=master_cals.get("cent"), select_nrows=5, - aperture=13, smoothing=400, median_box=21, - gaussian_sigma=0.0) + in_cent_trace=master_cals.get("cent"), select_nrows=(5,5), use_weights=True, + aperture=15, smoothing=400, median_box=101, gaussian_sigma=20.0) if skip_done and os.path.isfile(xflat_path): log.info(f"skipping {xflat_path}, file already exist") From fc251779c9576223246a691b23dee7befbed7cc6 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Fri, 10 May 2024 11:21:56 -0400 Subject: [PATCH 26/43] removing replacement of masked values in stray light subtraction --- python/lvmdrp/functions/imageMethod.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/lvmdrp/functions/imageMethod.py b/python/lvmdrp/functions/imageMethod.py index 5384b7f9..8b8b2949 100644 --- a/python/lvmdrp/functions/imageMethod.py +++ b/python/lvmdrp/functions/imageMethod.py @@ -2550,8 +2550,6 @@ def subtract_straylight( log.info("subtracting the smoothed background signal from the original image") img_out = loadImage(in_image) img_out._data = img_out._data - img_stray._data - img_out.setData(data=0.0, error=numpy.inf, select=(img_out._data==0)|(img_out._error==0), inplace=True) - img_out.setData(mask=True, select=(img_out._data==0)|~numpy.isfinite(img_out._error), inplace=True) # include header and write out file log.info(f"writing stray light subtracted image to {os.path.basename(out_image)}") From 1dbd8301495a305a1bc94c3fabd86a7194a39da6 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Fri, 10 May 2024 15:35:24 -0400 Subject: [PATCH 27/43] adding missing path --- python/lvmdrp/functions/run_quickdrp.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/lvmdrp/functions/run_quickdrp.py b/python/lvmdrp/functions/run_quickdrp.py index b328188b..093883af 100644 --- a/python/lvmdrp/functions/run_quickdrp.py +++ b/python/lvmdrp/functions/run_quickdrp.py @@ -139,6 +139,7 @@ def quick_science_reduction(expnum: int, use_fiducial_master: bool = False, wsci_path = path.full("lvm_anc", drpver=drpver, kind="w", imagetype=sci["imagetyp"], **sci) ssci_path = path.full("lvm_anc", drpver=drpver, kind="s", imagetype=sci["imagetyp"], **sci) hsci_path = path.full("lvm_anc", drpver=drpver, kind="h", imagetype=sci["imagetyp"], **sci) + lstr_path = path.full("lvm_anc", drpver=drpver, kind="d", imagetype="stray", **sci) os.makedirs(os.path.dirname(hsci_path), exist_ok=True) # define science product paths @@ -190,12 +191,12 @@ def quick_science_reduction(expnum: int, use_fiducial_master: bool = False, image_tasks.add_astrometry(in_image=dsci_path, out_image=dsci_path, in_agcsci_image=agcsci_path, in_agcskye_image=agcskye_path, in_agcskyw_image=agcskyw_path) # subtract straylight - image_tasks.subtract_straylight(in_image=lsci_path, out_image=lsci_path, out_stray=lstr_path, + image_tasks.subtract_straylight(in_image=dsci_path, out_image=lsci_path, out_stray=lstr_path, in_cent_trace=mtrace_path, select_nrows=(5,5), use_weights=True, aperture=15, smoothing=400, median_box=101, gaussian_sigma=20.0) # extract 1d spectra - image_tasks.extract_spectra(in_image=dsci_path, out_rss=xsci_path, in_trace=mtrace_path, in_fwhm=mwidth_path, + image_tasks.extract_spectra(in_image=lsci_path, out_rss=xsci_path, in_trace=mtrace_path, in_fwhm=mwidth_path, method=extraction_method, parallel=extraction_parallel) # per channel reduction From bb9e4b8d607c5a2459fbee7006f243eaf0c66afc Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Fri, 10 May 2024 16:49:12 -0400 Subject: [PATCH 28/43] improving error propagation and masked pixels handling in extraction --- python/lvmdrp/core/image.py | 2 +- python/lvmdrp/core/spectrum1d.py | 3 +++ python/lvmdrp/functions/imageMethod.py | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/python/lvmdrp/core/image.py b/python/lvmdrp/core/image.py index ed16f8b8..4de91f12 100644 --- a/python/lvmdrp/core/image.py +++ b/python/lvmdrp/core/image.py @@ -2459,7 +2459,7 @@ def extractSpecOptimal(self, cent_trace, trace_fwhm, plot_fig=False): mask = numpy.zeros((cent_trace._fibers, self._dim[1]), dtype="bool") self._data = numpy.nan_to_num(self._data) - self._error = numpy.nan_to_num(self._error, nan=1e10) + self._error = numpy.nan_to_num(self._error, nan=numpy.inf) # convert FWHM trace to sigma trace_sigma = trace_fwhm / 2.354 diff --git a/python/lvmdrp/core/spectrum1d.py b/python/lvmdrp/core/spectrum1d.py index bda5e249..31098add 100644 --- a/python/lvmdrp/core/spectrum1d.py +++ b/python/lvmdrp/core/spectrum1d.py @@ -3282,10 +3282,13 @@ def obtainGaussFluxPeaks(self, pos, sigma, indices, replace_error=1e10, plot=Fal ).astype("int") # defining bad pixels for each fiber if needed if self._mask is not None: + # select: fibers in the boundary of the chip bad_pix = numpy.zeros(fibers, dtype="bool") select = bn.nansum(pixels >= self._mask.shape[0], 1) nselect = numpy.logical_not(select) bad_pix[select] = True + + # masking fibers if all pixels are bad within aperture bad_pix[nselect] = bn.nansum(self._mask[pixels[nselect, :]], 1) == aperture else: bad_pix = None diff --git a/python/lvmdrp/functions/imageMethod.py b/python/lvmdrp/functions/imageMethod.py index 8b8b2949..7e60e829 100644 --- a/python/lvmdrp/functions/imageMethod.py +++ b/python/lvmdrp/functions/imageMethod.py @@ -4321,7 +4321,7 @@ def detrend_frame( detrended_img = (bcorr_img - mdark_img.convertUnit(to=bcorr_img._header["BUNIT"])) # NOTE: this is a hack to avoid the error propagation of the division in Image - detrended_img = detrended_img / numpy.nan_to_num(mflat_img._data, nan=1.0) + detrended_img._data = detrended_img._data / numpy.nan_to_num(mflat_img._data, nan=1.0) # propagate pixel mask log.info("propagating pixel mask") From a0ae36f2d2c14b11403e140e2f47dfe17e2ac377 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Fri, 10 May 2024 20:56:46 -0400 Subject: [PATCH 29/43] improving spline fitting for stray light modeling & fixing mask propagation --- python/lvmdrp/core/image.py | 37 ++++++++++++++++---------- python/lvmdrp/functions/imageMethod.py | 4 +-- 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/python/lvmdrp/core/image.py b/python/lvmdrp/core/image.py index 4de91f12..93a70cf3 100644 --- a/python/lvmdrp/core/image.py +++ b/python/lvmdrp/core/image.py @@ -1983,7 +1983,7 @@ def fitPoly(self, axis="y", order=4, plot=-1): new_img.setData(data=fit_result, mask=new_mask) return new_img - def fitSpline(self, axis="y", degree=3, smoothing=0, use_weights=False, clip=None): + def fitSpline(self, axis="y", degree=3, smoothing=0, use_weights=False, clip=None, interpolate_missing=True): """Fits a spline to the image along a given axis Parameters @@ -1999,6 +1999,8 @@ def fitSpline(self, axis="y", degree=3, smoothing=0, use_weights=False, clip=Non whether to use the inverse variance as weights for the spline fit or not, by default False clip : tuple, optional minimum and maximum values to clip the spline model, by default None + interpolate_missing : bool, optional + interpolate coefficients if spline fitting failed Returns ------- @@ -2012,15 +2014,13 @@ def fitSpline(self, axis="y", degree=3, smoothing=0, use_weights=False, clip=Non self.swapaxes() pixels = numpy.arange(self._dim[0]) - models = [] + models = numpy.zeros(self._dim) for i in range(self._dim[1]): good_pix = ~self._mask[:,i] if self._mask is not None else ~numpy.isnan(self._data[:,i]) - # good_pix = ~self._mask[:, i] if self._mask is not None else numpy.ones(self._dim[0], dtype=bool) # skip column if all pixels are masked if good_pix.sum() == 0: warnings.warn(f"Skipping column {i} due to all pixels being masked", RuntimeWarning) - models.append(numpy.zeros(self._dim[0])) continue # define spline fitting parameters @@ -2046,15 +2046,14 @@ def fitSpline(self, axis="y", degree=3, smoothing=0, use_weights=False, clip=Non if len(groups) <= degree+1: warnings.warn(f"Skipping column {i} due to insufficient data for spline fit", RuntimeWarning) - models.append(numpy.zeros(self._dim[0])) continue # collapse groups into single pixel new_masked_pixels, new_data, new_vars = [], [], [] for group in groups: - new_masked_pixels.append(numpy.mean(masked_pixels[group])) - new_data.append(numpy.median(data[group])) - new_vars.append(numpy.mean(vars[group])) + new_masked_pixels.append(numpy.nanmean(masked_pixels[group])) + new_data.append(numpy.nanmedian(data[group])) + new_vars.append(numpy.nanmean(vars[group])) masked_pixels = numpy.asarray(new_masked_pixels) data = numpy.asarray(new_data) vars = numpy.asarray(new_vars) @@ -2065,13 +2064,23 @@ def fitSpline(self, axis="y", degree=3, smoothing=0, use_weights=False, clip=Non spline_pars = interpolate.splrep(masked_pixels, data, w=weights, s=smoothing) else: spline_pars = interpolate.splrep(masked_pixels, data, s=smoothing) - model = interpolate.splev(pixels, spline_pars) - if clip is not None: - model = numpy.clip(model, clip[0], clip[1]) - models.append(model) + models[:, i] = interpolate.splev(pixels, spline_pars) + + # clip spline fit if required + if clip is not None: + models = numpy.clip(models, clip[0], clip[1]) - # reconstruct the model image - models = numpy.asarray(models).T + # interpolate failed columns if requested + masked_columns = numpy.count_nonzero((models == 0)|numpy.isnan(models), axis=0) >= 0.1*self._dim[0] + if interpolate_missing and masked_columns.any(): + log.info(f"interpolating spline fit in {masked_columns.sum()} columns") + x_pixels = numpy.arange(self._dim[1]) + f = interpolate.interp1d(x_pixels[~masked_columns], models[:, ~masked_columns], axis=1, bounds_error=False, fill_value="extrapolate") + models[:, masked_columns] = f(x_pixels[masked_columns]) + + # clip spline fit if required + if clip is not None: + models = numpy.clip(models, clip[0], clip[1]) # match orientation of the output array if axis == "y" or axis == "Y" or axis == 0: diff --git a/python/lvmdrp/functions/imageMethod.py b/python/lvmdrp/functions/imageMethod.py index 7e60e829..e2385582 100644 --- a/python/lvmdrp/functions/imageMethod.py +++ b/python/lvmdrp/functions/imageMethod.py @@ -2498,7 +2498,7 @@ def subtract_straylight( median_box = (1, max(1, median_box)) img_median = img.replaceMaskMedian(*median_box, replace_error=None) img_median._data = numpy.nan_to_num(img_median._data) - img_median = img_median.medianImg(median_box, use_mask=True) + img_median = img_median.medianImg(median_box, use_mask=False) else: img_median = copy(img) @@ -2540,7 +2540,7 @@ def subtract_straylight( img_fit = img_median.fitSpline(smoothing=smoothing, use_weights=use_weights, clip=(0.0, None)) # median filter to reject outlying columns - img_fit = img_fit.medianImg((1, 7), use_mask=True) + img_fit = img_fit.medianImg((1, 7), use_mask=False) # smooth the results by 2D Gaussian filter of given width log.info(f"smoothing the background signal by a 2D Gaussian filter of width {gaussian_sigma}") From 3541dee67411c6510a545ab569afdf85de34c697 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Sat, 11 May 2024 14:45:40 -0400 Subject: [PATCH 30/43] fixing writing of output fiber parameters --- python/lvmdrp/functions/run_calseq.py | 36 +++++++++++++-------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index 579d3739..ea58165f 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -1084,24 +1084,24 @@ def create_traces(mjd, use_fiducial_cals=True, expnums_ldls=None, expnums_qrtz=N mcents[camera].interpolate_data(axis="Y", extrapolate=True) mwidths[camera].interpolate_data(axis="Y", extrapolate=True) - # reset mask to propagate broken fibers - mamps[camera]._mask[bad_fibers] = True - mcents[camera]._mask[bad_fibers] = True - mwidths[camera]._mask[bad_fibers] = True - - # save master traces - mamp_path = path.full("lvm_master", drpver=drpver, tileid=tileid, mjd=mjd, camera=camera, kind="mamps") - mcent_path = path.full("lvm_master", drpver=drpver, tileid=tileid, mjd=mjd, camera=camera, kind="mtrace") - mwidth_path = path.full("lvm_master", drpver=drpver, tileid=tileid, mjd=mjd, camera=camera, kind="mwidth") - os.makedirs(os.path.dirname(mamp_path), exist_ok=True) - mamps[camera].writeFitsData(mamp_path) - mcents[camera].writeFitsData(mcent_path) - mwidths[camera].writeFitsData(mwidth_path) - - # eval model continuum and ratio - model, ratio = img_stray.eval_fiber_model(mamps[camera], mcents[camera], mwidths[camera]) - model.writeFitsData(dmodel_path) - ratio.writeFitsData(dratio_path) + # reset mask to propagate broken fibers + mamps[camera]._mask[bad_fibers] = True + mcents[camera]._mask[bad_fibers] = True + mwidths[camera]._mask[bad_fibers] = True + + # save master traces + mamp_path = path.full("lvm_master", drpver=drpver, tileid=tileid, mjd=mjd, camera=camera, kind="mamps") + mcent_path = path.full("lvm_master", drpver=drpver, tileid=tileid, mjd=mjd, camera=camera, kind="mtrace") + mwidth_path = path.full("lvm_master", drpver=drpver, tileid=tileid, mjd=mjd, camera=camera, kind="mwidth") + os.makedirs(os.path.dirname(mamp_path), exist_ok=True) + mamps[camera].writeFitsData(mamp_path) + mcents[camera].writeFitsData(mcent_path) + mwidths[camera].writeFitsData(mwidth_path) + + # eval model continuum and ratio + model, ratio = img_stray.eval_fiber_model(mamps[camera], mcents[camera], mwidths[camera]) + model.writeFitsData(dmodel_path) + ratio.writeFitsData(dratio_path) def create_fiberflats(mjd: int, use_fiducial_cals: bool = True, expnums: List[int] = None, median_box: int = 10, niter: bool = 1000, From 55fb48b2d5660559bbdc96e406b5f19c8edb5bfa Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Sun, 12 May 2024 18:35:42 -0400 Subject: [PATCH 31/43] implementing detection of exposed standard fiber for incomplete sequences --- python/lvmdrp/core/constants.py | 5 +- python/lvmdrp/functions/imageMethod.py | 5 +- python/lvmdrp/functions/run_calseq.py | 154 ++++++++++++++++++++----- 3 files changed, 133 insertions(+), 31 deletions(-) diff --git a/python/lvmdrp/core/constants.py b/python/lvmdrp/core/constants.py index 4cff5579..40d49d87 100644 --- a/python/lvmdrp/core/constants.py +++ b/python/lvmdrp/core/constants.py @@ -119,4 +119,7 @@ SPEC_CHANNELS = {"b": (3600, 5800), "r": (5775, 7570), "z": (7520, 9800)} ARC_LAMPS = ["NEON", "HGNE", "ARGON", "XENON"] -CON_LAMPS = ["LDLS", "QUARTZ"] \ No newline at end of file +CON_LAMPS = ["LDLS", "QUARTZ"] + +LVM_NBLOCKS = 18 +LVM_REFERENCE_COLUMN = 2000 diff --git a/python/lvmdrp/functions/imageMethod.py b/python/lvmdrp/functions/imageMethod.py index e2385582..d0f43f4d 100644 --- a/python/lvmdrp/functions/imageMethod.py +++ b/python/lvmdrp/functions/imageMethod.py @@ -24,7 +24,7 @@ from typing import List, Tuple from lvmdrp import log, __version__ as DRPVER -from lvmdrp.core.constants import CONFIG_PATH, SPEC_CHANNELS, ARC_LAMPS +from lvmdrp.core.constants import CONFIG_PATH, SPEC_CHANNELS, ARC_LAMPS, LVM_REFERENCE_COLUMN, LVM_NBLOCKS from lvmdrp.utils.decorators import skip_on_missing_input_path, drop_missing_input_paths from lvmdrp.utils.bitmask import QualityFlag from lvmdrp.core.fiberrows import FiberRows, _read_fiber_ypix @@ -81,9 +81,6 @@ } DEFAULT_PTC_PATH = os.path.join(os.environ["LVMCORE_DIR"], "metrology", "PTC_fit.txt") -LVM_NBLOCKS = 18 -LVM_REFERENCE_COLUMN = 2000 - description = "Provides Methods to process 2D images" __all__ = [ diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index ea58165f..11532060 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -29,14 +29,16 @@ import numpy as np from glob import glob from copy import deepcopy as copy -from shutil import copy2, move +from shutil import copy2 from itertools import product, groupby +from astropy.stats import biweight_location, biweight_scale from typing import List, Tuple, Dict from lvmdrp import log, path, __version__ as drpver from lvmdrp.utils import metadata as md +from lvmdrp.core.plot import create_subplots from lvmdrp.core import dataproducts as dp -from lvmdrp.core.constants import SPEC_CHANNELS +from lvmdrp.core.constants import SPEC_CHANNELS, LVM_REFERENCE_COLUMN, LVM_NBLOCKS from lvmdrp.core.tracemask import TraceMask from lvmdrp.core.image import loadImage from lvmdrp.core.rss import RSS, lvmFrame @@ -59,7 +61,7 @@ } -def get_sequence_metadata(mjd, expnums=None, exptime=None): +def get_sequence_metadata(mjd, expnums=None, exptime=None, extract_metadata=False): """Get frames metadata for a given sequence Given a set of MJDs and (optionally) exposure numbers, get the frames @@ -74,6 +76,8 @@ def get_sequence_metadata(mjd, expnums=None, exptime=None): List of exposure numbers to reduce exptime : int Filter frames metadata by exposure + extract_metadata : bool + Whether to extract metadata or not, by default False Returns: ------- @@ -83,7 +87,7 @@ def get_sequence_metadata(mjd, expnums=None, exptime=None): MJD for master frames """ # get frames metadata - frames = md.get_frames_metadata(mjd=mjd) + frames = md.get_frames_metadata(mjd=mjd, overwrite=extract_metadata) # filter by given expnums if expnums is not None: @@ -171,6 +175,114 @@ def choose_sequence(frames, flavor, kind): return chosen_frames, chosen_expnums +def get_exposed_std_fiber(mjd, expnums, camera, ref_column=LVM_REFERENCE_COLUMN, snr_threshold=5, display_plots=False): + """Returns the exposed standard fiber IDs for a given exposure sequence and camera + + Parameters + ---------- + mjd : int + MJD of the exposure sequence + expnums : list + List of exposure numbers in the sequence + camera : str + Camera name (e.g. "b1") + ref_column : int + Reference column for the fiber trace + display_plots : bool + If True, display plots + + Returns + ------- + dict + Dictionary with the exposed standard fiber IDs for each exposure in the sequence + """ + log.info(f"loading detrended frames for {camera = }, exposures = {expnums}") + rframe_paths = sorted([path.expand("lvm_anc", drpver=drpver, tileid=11111, mjd=mjd, camera=camera, expnum=expnum, kind="d", imagetype="*")[0] for expnum in expnums]) + images = [image_tasks.loadImage(rframe_path) for rframe_path in rframe_paths] + + # get exposed standard fibers from header if present + exposed_stds = {image._header["EXPOSURE"]: image._header.get("CALIBFIB", None) for image in images} + if all([exposed_std is not None for exposed_std in exposed_stds.values()]): + log.info(f"standard fibers in {camera = }: {','.join(exposed_stds.values())}") + return exposed_stds + else: + log.warning(f"exposed standard fibers not found in header for {camera = }, going to infer exposed fibers from SNR") + + # combine frames for given camera + log.info(f"combining {len(images)} exposures") + cimage = image_tasks.combineImages(images, normalize=False, background_subtract=False) + cimage.setData(data=np.nan_to_num(cimage._data), error=np.nan_to_num(cimage._error, nan=np.inf)) + # calculate correction in reference Y positions along reference column + fiber_pos = cimage.match_reference_column(ref_column) + + # define fibermap for camera & std fibers parameters + fibermap = cimage._slitmap[cimage._slitmap["spectrographid"]==int(camera[1])] + spec_select = fibermap["telescope"] == "Spec" + ids_std = fibermap[spec_select]["orig_ifulabel"] + pos_std = fiber_pos[spec_select].round().astype("int") + idx_std = np.arange(pos_std.size) + log.info(f"standard fibers in {camera = }: {','.join(ids_std)}") + + # calculate SNR along colummn + fig, axs = create_subplots(to_display=display_plots, + nrows=len(images)//3, ncols=3, + figsize=(15,5*len(images)//3), + sharex=True, sharey=True, + layout="constrained") + fig.supxlabel("standard fiber ID") + fig.supylabel("SNR at fiber centroid") + fig.suptitle(f"exposed standard fibers in sequence {expnums[0]} - {expnums[-1]} in camera '{camera}'") + exposed_stds, block_idxs = {}, np.arange(LVM_NBLOCKS).tolist() + for image, ax in zip(images, axs): + expnum = image._header["EXPOSURE"] + column = image.getSlice(ref_column, axis="Y") + snr = (column._data/column._error) + snr_med = biweight_location(snr[fiber_pos.round().astype("int")]) + snr_std = biweight_scale(snr[fiber_pos.round().astype("int")]) + snr_std_med = biweight_location(snr[pos_std]) + snr_std_std = biweight_scale(snr[pos_std]) + log.info(f"{expnum = } median SNR = {snr_med:.2f} +/- {snr_std:.2f} (standard fibers: {snr_std_med:.2f} +/- {snr_std_std:.2f})") + + ax.set_title(f"{expnum = }", loc="left") + ax.axhspan(snr_med-snr_std, snr_med+snr_std, lw=0, fc="0.7", alpha=0.5) + ax.axhline(snr_med, lw=1, color="0.7") + ax.axhspan(max(0, snr_std_med-snr_threshold*snr_std_std), snr_std_med+snr_threshold*snr_std_std, lw=0, fc="0.7", alpha=0.5) + ax.axhline(snr_std_med, lw=1, color="0.7") + ax.bar(idx_std, snr[pos_std], hatch="///////", lw=0, ec="tab:blue", fc="none", zorder=999) + ax.set_xticks(idx_std) + ax.set_xticklabels(ids_std) + + # select standard fiber exposed if any + select_std = (snr[pos_std] > snr_std_med+snr_threshold*snr_std_std) | (snr[pos_std] >= snr_med-snr_std) + exposed_std = ids_std[select_std] + if len(exposed_std) > 1: + exposed_std = [exposed_std[np.argmax(snr[pos_std[select_std]])]] + log.warning(f"more than one standard fiber selected in {expnum = }: {','.join(exposed_std)}, selecting higest SNR: {exposed_std[0]}") + exposed_std = exposed_std[0] if len(exposed_std) > 0 else None + if exposed_std is None: + continue + + # get block ID for exposed standard fiber + fiber_par = image._slitmap[image._slitmap["orig_ifulabel"] == exposed_std] + block_idx = int(fiber_par["blockid"][0][1:])-1 + block_idxs.remove(block_idx) + + exposed_stds[expnum] = (exposed_std, [block_idx]) + + # TODO: save figure + + # add missing blocks for first exposure + if len(block_idxs) > 0: + log.info(f"remaining blocks without exposed standard fibers: {block_idxs}, adding to first exposure") + expnum = list(exposed_stds.keys())[0] + exposed_stds[expnum] = (exposed_stds[expnum][0], sorted(exposed_stds[expnum][1]+block_idxs)) + + # list unexposed standard fibers + unexposed_stds = [fiber for fiber in ids_std if fiber not in exposed_stds.values()] + + return exposed_stds, unexposed_stds + + def _load_shift_report(mjd): """Reads QC reports with the electronic pixel shifts""" @@ -971,22 +1083,16 @@ def create_traces(mjd, use_fiducial_cals=True, expnums_ldls=None, expnums_qrtz=N tileid = frames.tileid.iloc[0] # iterate through exposures with std fibers exposed - expnum_params = _get_ring_expnums(expnums_ldls, expnums_qrtz, ring_size=12) - for camera, expnums in expnum_params.items(): - for expnum, block_idxs, fiber_str in expnums: - con_lamp = MASTER_CON_LAMPS[camera[0]] - if con_lamp == "ldls": - counts_threshold = 5000 - elif con_lamp == "quartz": - counts_threshold = 10000 - - # select fibers in current spectrograph - fibermap = SLITMAP[SLITMAP["spectrographid"] == int(camera[1])] - # select illuminated std fiber - select = fibermap["orig_ifulabel"] == fiber_str - # select fiber index - fiber_idx = np.where(select)[0][0] + cameras = ["b1", "b2", "b3", "r1", "r2", "r3", "z1", "z2", "z3"] + for camera in cameras: + expnums = expnums_qrtz if camera[0] == "z" else expnums_ldls + counts_threshold = 10000 if camera[0] == "z" else 5000 + + # select fibers in current spectrograph + fibermap = SLITMAP[SLITMAP["spectrographid"] == int(camera[1])] + exposed_stds, unexposed_stds = get_exposed_std_fiber(mjd=mjd, expnums=expnums, camera=camera) + for expnum, (std_fiberid, block_idxs) in exposed_stds.items(): # define paths dflat_path = path.full("lvm_anc", drpver=drpver, tileid=tileid, mjd=mjd, kind="d", imagetype="flat", camera=camera, expnum=expnum) lflat_path = path.full("lvm_anc", drpver=drpver, tileid=tileid, mjd=mjd, kind="l", imagetype="flat", camera=camera, expnum=expnum) @@ -1030,7 +1136,7 @@ def create_traces(mjd, use_fiducial_cals=True, expnums_ldls=None, expnums_qrtz=N img_stray = img_stray.convolveImg(np.ones((1, 20), dtype="uint8")) img_stray._error[img_stray._mask|(img_stray._error<=0)] = np.inf else: - log.info(f"going to trace std fiber {fiber_str} in {camera} within {block_idxs = }") + log.info(f"going to trace std fiber {std_fiberid} in {camera} within {block_idxs = }") centroids, trace_cent_fit, trace_flux_fit, trace_fwhm_fit, img_stray, model, mratio = image_tasks.trace_fibers( in_image=lflat_path, out_trace_amp=flux_path, out_trace_cent=cent_path, out_trace_fwhm=fwhm_path, @@ -1042,7 +1148,7 @@ def create_traces(mjd, use_fiducial_cals=True, expnums_ldls=None, expnums_qrtz=N ) # update master traces - log.info(f"{camera = }, {expnum = }, {fiber_str = :>6s}, fiber_idx = {fiber_idx:>3d}, FWHM = {np.nanmean(trace_fwhm_fit._data[fiber_idx]):.2f}") + log.info(f"{camera = }, {expnum = }, {std_fiberid = :>6s}") select_block = np.isin(fibermap["blockid"], [f"B{id+1}" for id in block_idxs]) if fit_poly: mamps[camera]._coeffs[select_block] = trace_flux_fit._coeffs[select_block] @@ -1065,11 +1171,7 @@ def create_traces(mjd, use_fiducial_cals=True, expnums_ldls=None, expnums_qrtz=N mcents[camera]._mask[bad_fibers] = True mwidths[camera]._mask[bad_fibers] = True # masking untraced standard fibers - try: - fiber_strs = list(zip(*expnum_params[camera]))[2] - except IndexError: - fiber_strs = [] - untraced_fibers = np.isin(fibermap["orig_ifulabel"].value, list(set(fibermap[fibermap["telescope"] == "Spec"]["orig_ifulabel"])-set(fiber_strs))) + untraced_fibers = np.isin(fibermap["orig_ifulabel"].value, unexposed_stds) mamps[camera]._mask[untraced_fibers] = True mcents[camera]._mask[untraced_fibers] = True mwidths[camera]._mask[untraced_fibers] = True From 9857441a479cb77f3d7a0f7430d24f6fd6c31fd2 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Mon, 13 May 2024 12:59:28 -0400 Subject: [PATCH 32/43] fixing filtering of metadata throughout --- python/lvmdrp/functions/run_calseq.py | 37 +++++++++++++++------------ 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index 11532060..3ccc4937 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -133,13 +133,13 @@ def choose_sequence(frames, flavor, kind): raise ValueError(f"invalid kind '{kind}', available values are 'nightly' and 'longterm'") if flavor == "twilight": - query = "imagetyp == 'flat' and not ldls|quartz" + query = "imagetyp == 'flat' and not (ldls|quartz)" elif flavor == "bias": query = "imagetyp == 'bias'" elif flavor == "flat": - query = "imagetyp == 'flat' and ldls|quartz" + query = "imagetyp == 'flat' and (ldls|quartz)" elif flavor == "arc": - query = "imagetyp == 'arc' and not ldls|quartz and neon|hgne|argon|xenon" + query = "imagetyp == 'arc' and not (ldls|quartz) and (neon|hgne|argon|xenon)" expnums = np.sort(frames.query(query).expnum.unique()) diff = np.diff(expnums) div, = np.where(np.abs(diff) > 1) @@ -149,14 +149,17 @@ def choose_sequence(frames, flavor, kind): log.info(f"found sequences: {sequences}") if len(sequences) == 0: - raise ValueError(f"no calibration frames of flavor '{flavor}' found using the query: {query}") + raise ValueError(f"no calibration frames of flavor '{flavor}' found using the query: '{query}'") lengths = [len(seq) for seq in sequences] - idx = lengths.index(min(lengths) if kind == "nightly" else max(lengths)) - if len(sequences) > 1: - chosen_expnums = sequences[idx] + if flavor == "twilight": + chosen_expnums = np.concatenate(sequences) else: - chosen_expnums = sequences[idx] + idx = lengths.index(min(lengths) if kind == "nightly" else max(lengths)) + if len(sequences) > 1: + chosen_expnums = sequences[idx] + else: + chosen_expnums = sequences[0] if flavor == "twilight": expected_length = 24 @@ -323,9 +326,9 @@ def _get_reference_expnum(frame, ref_frames): Reference frame metadata """ if frame.imagetyp == "flat" and frame.ldls|frame.quartz: - refs = ref_frames.query("imagetyp == 'flat' and ldls|quartz") + refs = ref_frames.query("imagetyp == 'flat' and (ldls|quartz)") elif frame.imagetyp == "flat": - refs = ref_frames.query("imagetyp == 'flat' and not ldls|quartz") + refs = ref_frames.query("imagetyp == 'flat' and not (ldls|quartz)") else: refs = ref_frames.query("imagetyp == @frame.imagetyp") @@ -343,7 +346,7 @@ def _get_reference_expnum(frame, ref_frames): def _move_master_calibrations(mjd, kind=None): kinds = {"bias", "trace", "width", "fiberflat", "wave", "lsf"} - if isinstance(kind, (list, tuple, np.ndarray)): + if isinstance(kind, (list, tuple, set, np.ndarray)): kinds = kind elif isinstance(kind, str) and kind in kinds: kinds = {kind} @@ -355,7 +358,7 @@ def _move_master_calibrations(mjd, kind=None): for kind in kinds: src_paths = path.expand("lvm_master", drpver=drpver, tileid=11111, mjd=mjd, kind=f"m{kind}", camera="*") for src_path in src_paths: - camera = path.extract("lvm_master", src_path)["camera"] + camera = os.path.basename(src_path).split(".")[0].split("-")[-1] dst_path = path.full("lvm_calib", mjd=mjd, kind=kind, camera=camera) if os.path.isfile(src_path): os.makedirs(os.path.dirname(dst_path), exist_ok=True) @@ -1255,7 +1258,7 @@ def create_fiberflats(mjd: int, use_fiducial_cals: bool = True, expnums: List[in # get metadata flats = get_sequence_metadata(mjd, expnums=expnums) if expnums is None: - flats.query("imagetyp == 'flat' and not ldls|quartz", inplace=True) + flats.query("imagetyp == 'flat' and not (ldls|quartz)", inplace=True) # 2D reduction of twilight sequence reduce_2d(mjd=mjd, use_fiducial_cals=use_fiducial_cals, expnums=flats.expnum.unique(), reject_cr=False, skip_done=skip_done) @@ -1430,7 +1433,7 @@ def create_wavelengths(mjd, use_fiducial_cals=True, expnums=None, skip_done=True Skip pipeline steps that have already been done """ frames = get_sequence_metadata(mjd, expnums=expnums) - frames = frames.query("imagetyp=='arc' or (not ldls|quartz and neon|hgne|argon|xenon)") + frames = frames.query("imagetyp=='arc' or (not (ldls|quartz) and (neon|hgne|argon|xenon))") frames["imagetyp"] = "arc" if use_fiducial_cals: @@ -1542,7 +1545,7 @@ def reduce_nightly_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_do else: log.warning("no bias exposures found") - dome_flats = frames.query("imagetyp == 'flat' and ldls|quartz") + dome_flats = frames.query("imagetyp == 'flat' and (ldls|quartz)") if len(dome_flats) != 0: expnums_ldls = np.sort(dome_flats.query("ldls").expnum.unique()) expnums_qrtz = np.sort(dome_flats.query("quartz").expnum.unique()) @@ -1552,14 +1555,14 @@ def reduce_nightly_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_do else: log.warning("no dome flat exposures found") - arcs = frames.query("imagetyp == 'arc' and not ldls|quartz and neon|hgne|argon|xenon") + arcs = frames.query("imagetyp == 'arc' and not (ldls|quartz) and (neon|hgne|argon|xenon)") if len(arcs) != 0: log.info(f"found {len(arcs)} arc exposures: {set(arcs.expnum)}") create_wavelengths(mjd=mjd, expnums=np.sort(arcs.expnum.unique()), skip_done=skip_done) else: log.warning("no arc exposures found") - twilight_flats = frames.query("imagetyp == 'flat' and not ldls|quartz") + twilight_flats = frames.query("imagetyp == 'flat' and not (ldls|quartz)") if len(twilight_flats) != 0: log.info(f"found {len(twilight_flats)} twilight exposures: {set(twilight_flats.expnum)}") create_fiberflats(mjd=mjd, expnums=sorted(np.sort(twilight_flats.expnum.unique())), skip_done=skip_done) From abaf262efc660c1b9ac0f12bed0d75659325b385 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Mon, 13 May 2024 14:39:42 -0400 Subject: [PATCH 33/43] fixing tests --- tests/functions/test_runtwilights.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functions/test_runtwilights.py b/tests/functions/test_runtwilights.py index 9c008885..9ce73647 100644 --- a/tests/functions/test_runtwilights.py +++ b/tests/functions/test_runtwilights.py @@ -99,7 +99,7 @@ def test_fit_continuum(): assert best_continuum.size == spectrum._data.size assert len(models) == 0 assert masked_pixels.sum() == 12 - assert len(tck[0]) == 95 + assert len(tck[0]) == 100+3 assert best_continuum[0] == pytest.approx(0, abs=1e-10) assert best_continuum[-1] == pytest.approx(0, abs=1e-10) From 4c01a52281b59a2362b4781b656e2c381f980238 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Mon, 13 May 2024 16:02:48 -0400 Subject: [PATCH 34/43] removing +1 pixel to QC reports --- python/lvmdrp/functions/run_calseq.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index 3ccc4937..82177d61 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -301,7 +301,7 @@ def _load_shift_report(mjd): exp = int(exp) spec = spec[-1] shifts = np.array([int(_) for _ in cols[4:]]) - shifts_report[(spec, exp)] = (shifts[::2]+1, shifts[1::2]) + shifts_report[(spec, exp)] = (shifts[::2], shifts[1::2]) return shifts_report From d7010acb5a5911015ad1ff9c9d7800f91e5a0b59 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Mon, 13 May 2024 16:39:16 -0400 Subject: [PATCH 35/43] using get_sequence_metadata to fix inconsistent metadata --- python/lvmdrp/functions/run_calseq.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index 82177d61..914fdbc2 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -1533,7 +1533,7 @@ def reduce_nightly_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_do cal_imagetyps = {"bias", "flat", "arc"} log.info(f"going to reduce nightly calibration frames: {cal_imagetyps}") - frames = md.get_frames_metadata(mjd) + frames = md.get_sequence_metadata(mjd) frames.query("imagetyp in @cal_imagetyps", inplace=True) if len(frames) == 0: raise ValueError(f"no frames found for MJD = {mjd}") @@ -1601,7 +1601,7 @@ def reduce_longterm_sequence(mjd, use_fiducial_cals=True, reject_cr=True, skip_d cal_imagetyps = {"bias", "flat", "arc"} log.info(f"going to reduce nightly calibration frames: {cal_imagetyps}") - frames = md.get_frames_metadata(mjd) + frames = get_sequence_metadata(mjd) frames.query("imagetyp in @cal_imagetyps", inplace=True) if len(frames) == 0: raise ValueError(f"no frames found for MJD = {mjd}") From c771abcda56b699a57fa3b7115818283579c5825 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Tue, 14 May 2024 06:49:43 -0400 Subject: [PATCH 36/43] improving custom shifts handling --- python/lvmdrp/core/image.py | 2 +- python/lvmdrp/functions/imageMethod.py | 117 +++++++++++++------------ 2 files changed, 60 insertions(+), 59 deletions(-) diff --git a/python/lvmdrp/core/image.py b/python/lvmdrp/core/image.py index 93a70cf3..c5f6a394 100644 --- a/python/lvmdrp/core/image.py +++ b/python/lvmdrp/core/image.py @@ -2354,7 +2354,7 @@ def trace_fiber_widths(self, fiber_centroids, ref_column=2000, ncolumns=40, nblo integral_mod = integral_mod if integral_mod != 0 else numpy.nan # NOTE: this is a hack to avoid integrating the whole column when tracing a few blocks integral_dat = numpy.trapz(img_slice._data * (mod_slice>0), img_slice._pixels) - residuals.append((integral_dat - integral_mod) / integral_dat * 100) + residuals.append((integral_mod - integral_dat) / integral_dat * 100) # compute fitted model stats chisq_red = bn.nansum((mod_slice - img_slice._data)[~img_slice._mask]**2 / img_slice._error[~img_slice._mask]**2) / (self._dim[0] - 1 - 3) diff --git a/python/lvmdrp/functions/imageMethod.py b/python/lvmdrp/functions/imageMethod.py index d0f43f4d..50838bd4 100644 --- a/python/lvmdrp/functions/imageMethod.py +++ b/python/lvmdrp/functions/imageMethod.py @@ -408,7 +408,7 @@ def _fix_fiber_thermal_shifts(image, trace_cent, trace_width=None, trace_amp=Non return trace_cent_fixed, column_shifts, model -def _apply_electronic_shifts(images, out_images, drp_shifts, qc_shifts=None, custom_shifts=None, raw_shifts=None, +def _apply_electronic_shifts(images, out_images, drp_shifts=None, qc_shifts=None, custom_shifts=None, raw_shifts=None, which_shifts="drp", apply_shifts=True, dry_run=False, display_plots=False): """Applies the chosen electronic pixel shifts to the images and plots the results @@ -419,7 +419,7 @@ def _apply_electronic_shifts(images, out_images, drp_shifts, qc_shifts=None, cus out_images : list list of output images drp_shifts : numpy.ndarray - DRP electronic pixel shifts + DRP electronic pixel shifts, by default None qc_shifts : numpy.ndarray QC electronic pixel shifts, by default None custom_shifts : numpy.ndarray @@ -481,7 +481,8 @@ def _apply_electronic_shifts(images, out_images, drp_shifts, qc_shifts=None, cus if raw_shifts is not None: ax.step(y_pixels, raw_shifts, where="mid", lw=0.5, color="0.9", label="raw DRP") ax.step(y_pixels, this_shifts, where="mid", color="k", lw=3) - ax.step(y_pixels, drp_shifts, where="mid", lw=1, color="tab:blue", label="DRP") + if drp_shifts is not None: + ax.step(y_pixels, drp_shifts, where="mid", lw=1, color="tab:blue", label="DRP") if qc_shifts is not None: ax.step(y_pixels, qc_shifts, where="mid", lw=2, color="tab:green", label="QC") if custom_shifts is not None: @@ -695,12 +696,22 @@ def fix_pixel_shifts(in_images, out_images, ref_images, in_mask, report=None, images_out = images # initialize custom shifts + raw_shifts = None + dshifts = None + qshifts = None cshifts = None apply_shifts = True which_shifts = "drp" # calculate pixel shifts or use provided ones - if shift_rows is None: + if shift_rows is not None: + log.info("using user provided pixel shifts") + cshifts = numpy.zeros(cdata.shape[0]) + for irow in shift_rows: + cshifts[irow:] += 2 + corrs = numpy.zeros_like(cshifts) + which_shifts = "custom" + else: log.info("running row-by-row cross-correlation") dshifts, corrs = [], [] for irow in range(rdata.shape[0]): @@ -724,64 +735,54 @@ def fix_pixel_shifts(in_images, out_images, ref_images, in_mask, report=None, dshifts = numpy.asarray(dshifts) corrs = numpy.asarray(corrs) - raw_shifts = None dshifts = _remove_spikes(dshifts, width=flat_spikes, threshold=threshold_spikes) dshifts = _fillin_valleys(dshifts, width=fill_gaps) dshifts = _no_stepdowns(dshifts) - else: - log.info("using user provided pixel shifts") - cshifts = numpy.zeros(cdata.shape[0]) - for irow in shift_rows: - cshifts[irow:] += 2 - raw_shifts = None - corrs = numpy.zeros_like(cshifts) - # parse QC reports with the electronic pixel shifts - if report is not None: - shift_rows, amount = report - qshifts = numpy.zeros(cdata.shape[0]) - for irow in shift_rows: - qshifts[irow:] = amount - else: - qshifts = None - - # compare QC reports with the electronic pixel shifts - if qshifts is not None: - qshifted_rows = numpy.where(numpy.gradient(qshifts) > 0)[0][1::2].tolist() - shifted_rows = numpy.where(numpy.gradient(dshifts) > 0)[0][1::2].tolist() - log.info(f"QC reports shifted rows: {qshifted_rows}") - log.info(f"DRP shifted rows: {shifted_rows}") - if not numpy.all(qshifts == dshifts): - _apply_electronic_shifts(images=images, out_images=out_images, - drp_shifts=dshifts, qc_shifts=qshifts, raw_shifts=raw_shifts, - which_shifts="drp", apply_shifts=True, - dry_run=True, display_plots=display_plots) - log.warning("QC reports and DRP do not agree on the shifted rows") - if interactive: - log.info("interactive mode enabled") - answer = input("apply [q]c, [d]rp or [c]ustom shifts: ") - if answer.lower() == "q": - log.info("choosing QC shifts") - shifts = qshifts - which_shifts = "qc" - elif answer.lower() == "d": - log.info("choosing DRP shifts") - shifts = dshifts - which_shifts = "drp" - elif answer.lower() == "c": - log.info("choosing custom shifts") - answer = input("provide comma-separated custom shifts and press enter: ") - shift_rows = numpy.array([int(_) for _ in answer.split(",")]) - cshifts = numpy.zeros(cdata.shape[0]) - for irow in shift_rows: - cshifts[irow:] += 2 - shifts = cshifts - corrs = numpy.zeros_like(cshifts) - which_shifts = "custom" - apply_shifts = numpy.any(numpy.abs(shifts)>0) - else: - log.warning(f"no shift will be applied to the images: {in_images}") - apply_shifts = False + # parse QC reports with the electronic pixel shifts + if report is not None: + shift_rows, amounts = report + qshifts = numpy.zeros(cdata.shape[0]) + for irow, amount in zip(shift_rows, amounts[::-1]): + qshifts[irow:] = amount + + # compare QC reports with the electronic pixel shifts + if qshifts is not None: + qshifted_rows = numpy.where(numpy.gradient(qshifts) > 0)[0][1::2].tolist() + shifted_rows = numpy.where(numpy.gradient(dshifts) > 0)[0][1::2].tolist() + log.info(f"QC reports shifted rows: {qshifted_rows}") + log.info(f"DRP shifted rows: {shifted_rows}") + if not numpy.all(qshifts == dshifts): + _apply_electronic_shifts(images=images, out_images=out_images, + drp_shifts=dshifts, qc_shifts=qshifts, raw_shifts=raw_shifts, + which_shifts="drp", apply_shifts=True, + dry_run=True, display_plots=display_plots) + log.warning("QC reports and DRP do not agree on the shifted rows") + if interactive: + log.info("interactive mode enabled") + answer = input("apply [q]c, [d]rp or [c]ustom shifts: ") + if answer.lower() == "q": + log.info("choosing QC shifts") + shifts = qshifts + which_shifts = "qc" + elif answer.lower() == "d": + log.info("choosing DRP shifts") + shifts = dshifts + which_shifts = "drp" + elif answer.lower() == "c": + log.info("choosing custom shifts") + answer = input("provide comma-separated custom shifts and press enter: ") + shift_rows = numpy.array([int(_) for _ in answer.split(",")]) + cshifts = numpy.zeros(cdata.shape[0]) + for irow in shift_rows: + cshifts[irow:] += 2 + shifts = cshifts + corrs = numpy.zeros_like(cshifts) + which_shifts = "custom" + apply_shifts = numpy.any(numpy.abs(shifts)>0) + else: + log.warning(f"no shift will be applied to the images: {in_images}") + apply_shifts = False # apply pixel shifts to the images images_out, shifts, _, = _apply_electronic_shifts(images=images, out_images=out_images, raw_shifts=raw_shifts, From bcfbd01b926bcdd9bf99e1e823d860e5ca96d8cb Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Tue, 14 May 2024 11:02:04 -0400 Subject: [PATCH 37/43] improving skipping in traces creation --- python/lvmdrp/functions/run_calseq.py | 123 ++++++++++++-------------- 1 file changed, 58 insertions(+), 65 deletions(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index 914fdbc2..c6d93950 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -345,7 +345,7 @@ def _get_reference_expnum(frame, ref_frames): def _move_master_calibrations(mjd, kind=None): - kinds = {"bias", "trace", "width", "fiberflat", "wave", "lsf"} + kinds = {"bias", "trace", "width", "fiberflat", "fiberflat_twilight", "wave", "lsf"} if isinstance(kind, (list, tuple, set, np.ndarray)): kinds = kind elif isinstance(kind, str) and kind in kinds: @@ -1126,18 +1126,8 @@ def create_traces(mjd, use_fiducial_cals=True, expnums_ldls=None, expnums_qrtz=N in_cent_trace=cent_guess_path, select_nrows=(5,5), use_weights=True, aperture=15, smoothing=400, median_box=101, gaussian_sigma=20.0) - if skip_done and os.path.isfile(flux_path): - log.info(f"skipping {flux_path}, file already exist") - trace_cent_fit = TraceMask.from_file(cent_path) - trace_flux_fit = TraceMask.from_file(flux_path) - trace_fwhm_fit = TraceMask.from_file(fwhm_path) - img_stray = loadImage(lflat_path) - img_stray.setData(data=np.nan_to_num(img_stray._data), error=np.nan_to_num(img_stray._error)) - img_stray = img_stray.replaceMaskMedian(1, 10, replace_error=None) - img_stray._data = np.nan_to_num(img_stray._data) - img_stray = img_stray.medianImg((1,10), propagate_error=True) - img_stray = img_stray.convolveImg(np.ones((1, 20), dtype="uint8")) - img_stray._error[img_stray._mask|(img_stray._error<=0)] = np.inf + if skip_done and os.path.isfile(cent_path) and os.path.isfile(flux_path) and os.path.isfile(fwhm_path): + log.info(f"skipping {cent_path}, {flux_path} and {fwhm_path}, file already exist") else: log.info(f"going to trace std fiber {std_fiberid} in {camera} within {block_idxs = }") centroids, trace_cent_fit, trace_flux_fit, trace_fwhm_fit, img_stray, model, mratio = image_tasks.trace_fibers( @@ -1150,63 +1140,66 @@ def create_traces(mjd, use_fiducial_cals=True, expnums_ldls=None, expnums_qrtz=N fit_poly=fit_poly, interpolate_missing=False, poly_deg=(poly_deg_amp, poly_deg_cent, poly_deg_width) ) - # update master traces - log.info(f"{camera = }, {expnum = }, {std_fiberid = :>6s}") - select_block = np.isin(fibermap["blockid"], [f"B{id+1}" for id in block_idxs]) - if fit_poly: - mamps[camera]._coeffs[select_block] = trace_flux_fit._coeffs[select_block] - mcents[camera]._coeffs[select_block] = trace_cent_fit._coeffs[select_block] - mwidths[camera]._coeffs[select_block] = trace_fwhm_fit._coeffs[select_block] - else: - mamps[camera]._coeffs = None - mcents[camera]._coeffs = None - mwidths[camera]._coeffs = None - mamps[camera]._data[select_block] = trace_flux_fit._data[select_block] - mcents[camera]._data[select_block] = trace_cent_fit._data[select_block] - mwidths[camera]._data[select_block] = trace_fwhm_fit._data[select_block] - mamps[camera]._mask[select_block] = False - mcents[camera]._mask[select_block] = False - mwidths[camera]._mask[select_block] = False - - # masking bad fibers - bad_fibers = fibermap["fibstatus"] == 1 - mamps[camera]._mask[bad_fibers] = True - mcents[camera]._mask[bad_fibers] = True - mwidths[camera]._mask[bad_fibers] = True - # masking untraced standard fibers - untraced_fibers = np.isin(fibermap["orig_ifulabel"].value, unexposed_stds) - mamps[camera]._mask[untraced_fibers] = True - mcents[camera]._mask[untraced_fibers] = True - mwidths[camera]._mask[untraced_fibers] = True - - # interpolate master traces in missing fibers - if fit_poly: - mamps[camera].interpolate_coeffs() - mcents[camera].interpolate_coeffs() - mwidths[camera].interpolate_coeffs() - else: - mamps[camera].interpolate_data(axis="Y", extrapolate=True) - mcents[camera].interpolate_data(axis="Y", extrapolate=True) - mwidths[camera].interpolate_data(axis="Y", extrapolate=True) + # update master traces + log.info(f"{camera = }, {expnum = }, {std_fiberid = :>6s}") + select_block = np.isin(fibermap["blockid"], [f"B{id+1}" for id in block_idxs]) + if fit_poly: + mamps[camera]._coeffs[select_block] = trace_flux_fit._coeffs[select_block] + mcents[camera]._coeffs[select_block] = trace_cent_fit._coeffs[select_block] + mwidths[camera]._coeffs[select_block] = trace_fwhm_fit._coeffs[select_block] + else: + mamps[camera]._coeffs = None + mcents[camera]._coeffs = None + mwidths[camera]._coeffs = None + mamps[camera]._data[select_block] = trace_flux_fit._data[select_block] + mcents[camera]._data[select_block] = trace_cent_fit._data[select_block] + mwidths[camera]._data[select_block] = trace_fwhm_fit._data[select_block] + mamps[camera]._mask[select_block] = False + mcents[camera]._mask[select_block] = False + mwidths[camera]._mask[select_block] = False - # reset mask to propagate broken fibers - mamps[camera]._mask[bad_fibers] = True - mcents[camera]._mask[bad_fibers] = True - mwidths[camera]._mask[bad_fibers] = True - - # save master traces mamp_path = path.full("lvm_master", drpver=drpver, tileid=tileid, mjd=mjd, camera=camera, kind="mamps") mcent_path = path.full("lvm_master", drpver=drpver, tileid=tileid, mjd=mjd, camera=camera, kind="mtrace") mwidth_path = path.full("lvm_master", drpver=drpver, tileid=tileid, mjd=mjd, camera=camera, kind="mwidth") os.makedirs(os.path.dirname(mamp_path), exist_ok=True) - mamps[camera].writeFitsData(mamp_path) - mcents[camera].writeFitsData(mcent_path) - mwidths[camera].writeFitsData(mwidth_path) - - # eval model continuum and ratio - model, ratio = img_stray.eval_fiber_model(mamps[camera], mcents[camera], mwidths[camera]) - model.writeFitsData(dmodel_path) - ratio.writeFitsData(dratio_path) + if skip_done and os.path.isfile(mcent_path) and os.path.isfile(mamp_path) and os.path.isfile(mwidth_path): + log.info(f"skipping {mcent_path}, {mamp_path} and {mwidth_path}, files already exist") + else: + # masking bad fibers + bad_fibers = fibermap["fibstatus"] == 1 + mamps[camera]._mask[bad_fibers] = True + mcents[camera]._mask[bad_fibers] = True + mwidths[camera]._mask[bad_fibers] = True + # masking untraced standard fibers + untraced_fibers = np.isin(fibermap["orig_ifulabel"].value, unexposed_stds) + mamps[camera]._mask[untraced_fibers] = True + mcents[camera]._mask[untraced_fibers] = True + mwidths[camera]._mask[untraced_fibers] = True + + # interpolate master traces in missing fibers + if fit_poly: + mamps[camera].interpolate_coeffs() + mcents[camera].interpolate_coeffs() + mwidths[camera].interpolate_coeffs() + else: + mamps[camera].interpolate_data(axis="Y", extrapolate=True) + mcents[camera].interpolate_data(axis="Y", extrapolate=True) + mwidths[camera].interpolate_data(axis="Y", extrapolate=True) + + # reset mask to propagate broken fibers + mamps[camera]._mask[bad_fibers] = True + mcents[camera]._mask[bad_fibers] = True + mwidths[camera]._mask[bad_fibers] = True + + # save master traces + mamps[camera].writeFitsData(mamp_path) + mcents[camera].writeFitsData(mcent_path) + mwidths[camera].writeFitsData(mwidth_path) + + # eval model continuum and ratio + model, ratio = img_stray.eval_fiber_model(mamps[camera], mcents[camera], mwidths[camera]) + model.writeFitsData(dmodel_path) + ratio.writeFitsData(dratio_path) def create_fiberflats(mjd: int, use_fiducial_cals: bool = True, expnums: List[int] = None, median_box: int = 10, niter: bool = 1000, From 4790235130e2fb6cee5034105f9e3346122509ca Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Tue, 14 May 2024 11:38:10 -0400 Subject: [PATCH 38/43] cleaning duplicated filtering in arcs reduction --- python/lvmdrp/functions/run_calseq.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index c6d93950..b0f013ca 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -1426,8 +1426,6 @@ def create_wavelengths(mjd, use_fiducial_cals=True, expnums=None, skip_done=True Skip pipeline steps that have already been done """ frames = get_sequence_metadata(mjd, expnums=expnums) - frames = frames.query("imagetyp=='arc' or (not (ldls|quartz) and (neon|hgne|argon|xenon))") - frames["imagetyp"] = "arc" if use_fiducial_cals: masters_mjd = get_master_mjd(mjd) From 58dd2c99cb64d7b4e809f3c0c77a81894e881955 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Tue, 14 May 2024 16:47:14 -0400 Subject: [PATCH 39/43] fitting a linear continuum brackground when measuring arc lines --- python/lvmdrp/core/spectrum1d.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/lvmdrp/core/spectrum1d.py b/python/lvmdrp/core/spectrum1d.py index 31098add..935fb247 100644 --- a/python/lvmdrp/core/spectrum1d.py +++ b/python/lvmdrp/core/spectrum1d.py @@ -3231,8 +3231,8 @@ def _fit_gaussian(self, select, back, error, ftol, xtol, warning): par = [0.0, 0.0, 0.0] gauss = fit_profile.Gaussian(par) else: - par = [0.0, 0.0, 0.0, 0.0] - gauss = fit_profile.Gaussian_const(par) + par = [0.0, 0.0, 0.0, 0.0, 0.0] + gauss = fit_profile.Gaussian_poly(par) gauss.fit( self._wave[select], From 150b1f680188ca9000546d5a10f54d3fbdd23aff Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Wed, 15 May 2024 09:05:14 -0400 Subject: [PATCH 40/43] updating wavelength & LSF fitting --- python/lvmdrp/core/fiberrows.py | 11 +++++++++++ python/lvmdrp/core/spectrum1d.py | 4 ++-- .../lvm-pixwav-neon_hgne_argon_xenon_b2.txt | 2 +- python/lvmdrp/functions/rssMethod.py | 13 +++++++++++++ 4 files changed, 27 insertions(+), 3 deletions(-) diff --git a/python/lvmdrp/core/fiberrows.py b/python/lvmdrp/core/fiberrows.py index 1a35e7c5..e1841a24 100644 --- a/python/lvmdrp/core/fiberrows.py +++ b/python/lvmdrp/core/fiberrows.py @@ -900,6 +900,17 @@ def measureArcLines( for i in iterator: spec = self.getSpec(i) + # if i in [562, 563, 564, 565]: + # ncols = 3 + # nlines = len(cent_wave[0]) + # nrows = int(numpy.ceil(nlines / ncols)) + # fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6*ncols, 6*nrows)) + # axs = axs.flatten() + # fig.suptitle("Gaussian fitting") + # fig.supylabel("counts (e-/pixel)") + # fit = spec.fitSepGauss(cent_wave[i - 1], aperture, init_back, axs=axs) + # fig.savefig(f"lines_fit_{i}.png", bbox_inches="tight") + # else: fit = spec.fitSepGauss(cent_wave[i - 1], aperture, init_back, axs=None) flux[i, :] = numpy.fabs(fit[:nlines]) cent_wave[i, :] = fit[nlines : 2 * nlines] diff --git a/python/lvmdrp/core/spectrum1d.py b/python/lvmdrp/core/spectrum1d.py index 935fb247..31098add 100644 --- a/python/lvmdrp/core/spectrum1d.py +++ b/python/lvmdrp/core/spectrum1d.py @@ -3231,8 +3231,8 @@ def _fit_gaussian(self, select, back, error, ftol, xtol, warning): par = [0.0, 0.0, 0.0] gauss = fit_profile.Gaussian(par) else: - par = [0.0, 0.0, 0.0, 0.0, 0.0] - gauss = fit_profile.Gaussian_poly(par) + par = [0.0, 0.0, 0.0, 0.0] + gauss = fit_profile.Gaussian_const(par) gauss.fit( self._wave[select], diff --git a/python/lvmdrp/etc/wavelength/lvm-pixwav-neon_hgne_argon_xenon_b2.txt b/python/lvmdrp/etc/wavelength/lvm-pixwav-neon_hgne_argon_xenon_b2.txt index 80d8e39a..7bd10083 100644 --- a/python/lvmdrp/etc/wavelength/lvm-pixwav-neon_hgne_argon_xenon_b2.txt +++ b/python/lvmdrp/etc/wavelength/lvm-pixwav-neon_hgne_argon_xenon_b2.txt @@ -1,5 +1,5 @@ 319 -74.71 3593.5684 1 +74.71 3593.5684 0 165.63 3650.1530 1 173.21 3654.8360 0 186.59 3663.2046 1 diff --git a/python/lvmdrp/functions/rssMethod.py b/python/lvmdrp/functions/rssMethod.py index 7a5f34aa..22e9ba9b 100644 --- a/python/lvmdrp/functions/rssMethod.py +++ b/python/lvmdrp/functions/rssMethod.py @@ -393,6 +393,10 @@ def determine_wavelength_solution(in_arcs: List[str], out_wave: str, out_lsf: st label="lines_fitting", ) + # numpy.savetxt("./pixels.txt", cent_wave) + # numpy.savetxt("./flux.txt", flux) + # numpy.savetxt("./fwhm.txt", fwhm) + if fiberflat != "": log.info("computing fiberflat from measured lines") norm_flux = numpy.zeros_like(ref_lines) @@ -480,6 +484,15 @@ def determine_wavelength_solution(in_arcs: List[str], out_wave: str, out_lsf: st wave_coeffs[i, :] = wave_poly.convert().coef wave_sol[i, :] = wave_poly(arc._pixels) wave_rms[i] = numpy.std(wave_poly(cent_wave[i, use_line]) - ref_lines[use_line]) + # if i in [565, 566, 567, 568]: + # fig, ax = plt.subplots(figsize=(15, 7)) + # # ax.plot(cent_wave[i], ref_lines+i - wave_poly.convert().coef[0] - wave_poly.convert().coef[1]*cent_wave[i], "ok") + # # ax.plot(arc._pixels, wave_poly(arc._pixels)+i - wave_poly.convert().coef[0] - wave_poly.convert().coef[1]*arc._pixels, "-r") + # residuals = wave_poly(cent_wave[i]) - ref_lines + # ax.plot(cent_wave[i], residuals, "o", color=("b" if i != 319 else "r")) + # fig.savefig("wave_poly.png") + # print(f"{i}: {cent_wave[i]}") + # print(wave_poly.convert().coef) log.info( "finished wavelength fitting with median " From 4d1ded23876a19462cc9e4d685a40e85e714e737 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Wed, 15 May 2024 14:48:01 -0400 Subject: [PATCH 41/43] fixing flux calibration to use all three specs --- python/lvmdrp/core/fluxcal.py | 2 +- python/lvmdrp/functions/fluxCalMethod.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/python/lvmdrp/core/fluxcal.py b/python/lvmdrp/core/fluxcal.py index 87f64fad..c9a9de74 100644 --- a/python/lvmdrp/core/fluxcal.py +++ b/python/lvmdrp/core/fluxcal.py @@ -25,7 +25,7 @@ def retrieve_header_stars(rss): lat=-29.008999964 * u.deg, lon=-70.688663912 * u.deg, height=2800 * u.m ) h = rss._header - slitmap = rss._slitmap[rss._slitmap["spectrographid"] == int(h["SPEC"][-1])] + slitmap = rss._slitmap # retrieve the data for the 12 standards from the header stddata = [] for i in range(12): diff --git a/python/lvmdrp/functions/fluxCalMethod.py b/python/lvmdrp/functions/fluxCalMethod.py index 5923726d..c627f165 100644 --- a/python/lvmdrp/functions/fluxCalMethod.py +++ b/python/lvmdrp/functions/fluxCalMethod.py @@ -342,8 +342,8 @@ def fluxcal_Gaia(channel, in_rss, plot=True, GAIA_CACHE_DIR=None): ) if plot: - plt.plot(wgood, sgood, "r.", markersize=4) - plt.plot(w, res[f"STD{nn}SEN"], linewidth=0.5) + plt.plot(wgood, sgood, ".k", markersize=2, zorder=-999) + plt.plot(w, res[f"STD{nn}SEN"], linewidth=1) # plt.ylim(0,0.1e-11) rms = biweight_scale(res.to_pandas().values, axis=1, ignore_nan=True) From 2aaedf8926c73102b28d9f2e7e2744f942597b97 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Thu, 16 May 2024 00:05:55 -0400 Subject: [PATCH 42/43] fixing gradient field correction --- python/lvmdrp/functions/run_calseq.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index b0f013ca..6892c618 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -1256,13 +1256,12 @@ def create_fiberflats(mjd: int, use_fiducial_cals: bool = True, expnums: List[in # 2D reduction of twilight sequence reduce_2d(mjd=mjd, use_fiducial_cals=use_fiducial_cals, expnums=flats.expnum.unique(), reject_cr=False, skip_done=skip_done) + masters_mjd = get_master_mjd(mjd) + masters_path = os.path.join(MASTERS_DIR, f"{masters_mjd}") for flat in flats.to_dict("records"): # master calibration paths camera = flat["camera"] - mjd = flat["mjd"] - masters_mjd = get_master_mjd(mjd) - masters_path = os.path.join(MASTERS_DIR, f"{masters_mjd}") if use_fiducial_cals: master_cals = { "cent" : os.path.join(masters_path, f"lvm-mtrace-{camera}.fits"), @@ -1346,14 +1345,21 @@ def create_fiberflats(mjd: int, use_fiducial_cals: bool = True, expnums: List[in hflat_path = path.full("lvm_anc", drpver=drpver, kind="h", imagetype=flat["imagetyp"], tileid=flat["tileid"], mjd=flat["mjd"], camera=channel, expnum=expnum) rss_tasks.resample_wavelength(in_rss=wflat_path, out_rss=hflat_path, wave_disp=0.5, wave_range=SPEC_CHANNELS[channel]) - # fit gradient and remove it - gflat_path = path.full("lvm_anc", drpver=drpver, kind="g", imagetype=flat["imagetyp"], tileid=flat["tileid"], mjd=flat["mjd"], camera=channel, expnum=expnum) - remove_field_gradient(in_hflat=hflat_path, out_gflat=gflat_path, wrange=SPEC_CHANNELS[channel]) - # fit fiber throughput - fflat = fit_fiberflat(in_twilight=gflat_path, out_flat=fflat_path, out_rss=fflat_flatfielded_path, median_box=median_box, niter=niter, + fflat = fit_fiberflat(in_twilight=hflat_path, out_flat=fflat_path, out_rss=fflat_flatfielded_path, median_box=median_box, niter=niter, threshold=threshold, mask_bands=mask_bands.get(channel, []), display_plots=display_plots, nknots=nknots) + + # fit gradient and remove it + gflat_path = path.full("lvm_anc", drpver=drpver, kind="g", imagetype=flat["imagetyp"], tileid=flat["tileid"], mjd=flat["mjd"], camera=channel, expnum=expnum) + remove_field_gradient(in_hflat=fflat_flatfielded_path, out_gflat=gflat_path, wrange=SPEC_CHANNELS[channel]) + + fflat_flatfielded = RSS.from_file(fflat_flatfielded_path) + gflat = RSS.from_file(gflat_path) + gradient = copy(gflat) + gradient._data = fflat_flatfielded._data / gflat._data + fflat._data /= gradient._data + fflat.writeFitsData(fflat_path) fflat.set_wave_trace(mwave) fflat.set_lsf_trace(mlsf) fflat = fflat.to_native_wave(method="linear", interp_density=False, return_density=False) From e322b668bbe871df6aa4570d34a9686c0c27e6c1 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Thu, 16 May 2024 10:57:42 -0400 Subject: [PATCH 43/43] fixing masking issues in fiberflats --- python/lvmdrp/functions/run_calseq.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/lvmdrp/functions/run_calseq.py b/python/lvmdrp/functions/run_calseq.py index 6892c618..d99c2910 100644 --- a/python/lvmdrp/functions/run_calseq.py +++ b/python/lvmdrp/functions/run_calseq.py @@ -1359,6 +1359,9 @@ def create_fiberflats(mjd: int, use_fiducial_cals: bool = True, expnums: List[in gradient = copy(gflat) gradient._data = fflat_flatfielded._data / gflat._data fflat._data /= gradient._data + fflat._mask |= np.isnan(fflat._data) + fflat = fflat.interpolate_data(axis="X") + fflat = fflat.interpolate_data(axis="Y") fflat.writeFitsData(fflat_path) fflat.set_wave_trace(mwave) fflat.set_lsf_trace(mlsf)