Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Long-term calibration sequence #87

Merged
merged 44 commits into from
May 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
44 commits
Select commit Hold shift + click to select a range
b23f5fd
implementing long-term calibration routine
ajmejia May 4, 2024
2700262
quick fix to frames metadata
ajmejia May 4, 2024
fd337d5
implementing QC shift reports & interactive mode to choose solve disc…
ajmejia May 5, 2024
c3a5e2c
implementing guess of reference frame
ajmejia May 5, 2024
82c1b6b
handling annoying extra lines
ajmejia May 5, 2024
714af30
cleaning print
ajmejia May 5, 2024
3af6c5d
better handling of conflicting shifts (fixes Implement Dmitry's pixel…
ajmejia May 5, 2024
0632954
refining selection of reference & fixing bugs
ajmejia May 5, 2024
218fd72
fixing finding of reference image
ajmejia May 5, 2024
be55bbd
Merge branch 'master' into ltcals
ajmejia May 6, 2024
6f31173
implementing CLI for long-term calibrations
ajmejia May 6, 2024
ba68c7b
fixing bug on missing references
ajmejia May 6, 2024
8520528
fixing hard-coded parameters in tracing
ajmejia May 6, 2024
cc638c2
improved handling of invalid errors & fixed writting of output traces
ajmejia May 6, 2024
576cc57
returning tck tuple
ajmejia May 7, 2024
b19daaf
fixing sequence selection
ajmejia May 7, 2024
0ffac12
fixing moving of long-term cals to sandbox
ajmejia May 7, 2024
1271ce7
fixing sequences order
ajmejia May 7, 2024
90414d9
fixing hard-coded values in fiber thermal shifts, improved stats & log
ajmejia May 8, 2024
af7dd3b
improving stray light subtraction for science frames
ajmejia May 8, 2024
7dc4643
implementing stray light subtraction for science reductions
ajmejia May 8, 2024
b14dfe9
fixing handling of new paths in fiducial calibrations
ajmejia May 9, 2024
661f465
fixing minor bug
ajmejia May 9, 2024
180c784
fixing bug in straylight subtraction
ajmejia May 9, 2024
6451f51
fixing fit_continuum output
ajmejia May 10, 2024
3942eec
making stray light subtraction consistent all across
ajmejia May 10, 2024
fc25177
removing replacement of masked values in stray light subtraction
ajmejia May 10, 2024
1dbd830
adding missing path
ajmejia May 10, 2024
bb9e4b8
improving error propagation and masked pixels handling in extraction
ajmejia May 10, 2024
a0ae36f
improving spline fitting for stray light modeling & fixing mask propa…
ajmejia May 11, 2024
3541dee
fixing writing of output fiber parameters
ajmejia May 11, 2024
55fb48b
implementing detection of exposed standard fiber for incomplete seque…
ajmejia May 12, 2024
9857441
fixing filtering of metadata throughout
ajmejia May 13, 2024
abaf262
fixing tests
ajmejia May 13, 2024
4c01a52
removing +1 pixel to QC reports
ajmejia May 13, 2024
d7010ac
using get_sequence_metadata to fix inconsistent metadata
ajmejia May 13, 2024
c771abc
improving custom shifts handling
ajmejia May 14, 2024
bcfbd01
improving skipping in traces creation
ajmejia May 14, 2024
4790235
cleaning duplicated filtering in arcs reduction
ajmejia May 14, 2024
58dd2c9
fitting a linear continuum brackground when measuring arc lines
ajmejia May 14, 2024
150b1f6
updating wavelength & LSF fitting
ajmejia May 15, 2024
4d1ded2
fixing flux calibration to use all three specs
ajmejia May 15, 2024
2aaedf8
fixing gradient field correction
ajmejia May 16, 2024
e322b66
fixing masking issues in fiberflats
ajmejia May 16, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 25 additions & 9 deletions bin/drp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,12 @@ from lvmdrp.utils.metadata import get_frames_metadata, get_master_metadata
from lvmdrp.utils.cluster import run_cluster

from lvmdrp.functions.run_quickdrp import quick_science_reduction
from lvmdrp.functions.run_calseq import MASK_BANDS, create_fiberflats, reduce_nightly_sequence, fix_raw_pixel_shifts
from lvmdrp.functions.run_calseq import (
MASK_BANDS,
create_fiberflats,
reduce_nightly_sequence,
reduce_longterm_sequence,
fix_raw_pixel_shifts)

from sdss_access import Access

Expand Down Expand Up @@ -249,6 +254,22 @@ def nightly_calibrations(mjd, skip_cr, use_fiducial_cals, skip_done, keep_ancill
cli.add_command(nightly_calibrations)


@cli.command('long-term-cals', short_help='Run the long-term calibration sequence')
@click.option('-m', '--mjd', type=int, required=True, help='the MJD to reduce')
@click.option('-r', '--skip-cr', is_flag=True, default=False, help='flag to skip cosmic rays rejection')
@click.option('-f', '--use-fiducial-cals', is_flag=True, default=False, help='flag to use fiducial calibration frames')
@click.option('-s', '--skip-done', is_flag=True, default=False, help='flag to skip reduction steps that have already been done')
@click.option('-a', '--keep-ancillary', is_flag=True, default=False, help='flag to keep ancillary files')
def long_term_calibrations(mjd, skip_cr, use_fiducial_cals, skip_done, keep_ancillary):
""" Run the long-term calibration sequence """
reduce_longterm_sequence(mjd=mjd, reject_cr=not skip_cr,
use_fiducial_cals=use_fiducial_cals,
skip_done=skip_done, keep_ancillary=keep_ancillary)

# register long-term calibration sequence
cli.add_command(long_term_calibrations)


@cli.command('fix-pixel-shifts', short_help='Fix the electronic pixel shifts in the raw data')
@click.option('-m', '--mjd', type=int, help='the MJD to reduce')
@click.option('-e', '--expnums', type=IntListType(), help='a list of exposure numbers to reduce')
Expand All @@ -257,18 +278,13 @@ cli.add_command(nightly_calibrations)
@click.option('-y', '--y-widths', type=int, default=20, help='the width of the cross-dispersion aperture in pixels')
@click.option('-f', '--flat-spikes', type=int, default=21, help='the window within which to filter spikes')
@click.option('-t', '--threshold-spikes', type=float, default=0.6, help='the threshold for the spikes filtering')
def fix_pixel_shifts(mjd, expnums, ref_expnums, wave_widths, y_widths, flat_spikes, threshold_spikes):
@click.option('-i', '--interactive', is_flag=True, default=False, help='flag to run in interactive mode when QC and DRP discrepancies are found')
def fix_pixel_shifts(mjd, expnums, ref_expnums, wave_widths, y_widths, flat_spikes, threshold_spikes, interactive):
""" Fix the electronic pixel shifts in the raw data """
# TODO: read Dmitry's reports on shifts
# TODO: calculate shifts and compare to Dmitry's
# TODO: if different, visually inspect the suspect frames
# TODO: manually correct shifts and pass as `shift_rows` parameter
# TODO: add shift corrections as header metadata

fix_raw_pixel_shifts(mjd=mjd, expnums=expnums, ref_expnums=ref_expnums,
wave_widths=wave_widths, y_widths=y_widths,
flat_spikes=flat_spikes, threshold_spikes=threshold_spikes,
skip_done=True)
interactive=interactive, skip_done=True)

# register fix pixel shifts command
cli.add_command(fix_pixel_shifts)
Expand Down
5 changes: 4 additions & 1 deletion python/lvmdrp/core/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,4 +119,7 @@
SPEC_CHANNELS = {"b": (3600, 5800), "r": (5775, 7570), "z": (7520, 9800)}

ARC_LAMPS = ["NEON", "HGNE", "ARGON", "XENON"]
CON_LAMPS = ["LDLS", "QUARTZ"]
CON_LAMPS = ["LDLS", "QUARTZ"]

LVM_NBLOCKS = 18
LVM_REFERENCE_COLUMN = 2000
11 changes: 11 additions & 0 deletions python/lvmdrp/core/fiberrows.py
Original file line number Diff line number Diff line change
Expand Up @@ -900,6 +900,17 @@ def measureArcLines(
for i in iterator:
spec = self.getSpec(i)

# if i in [562, 563, 564, 565]:
# ncols = 3
# nlines = len(cent_wave[0])
# nrows = int(numpy.ceil(nlines / ncols))
# fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6*ncols, 6*nrows))
# axs = axs.flatten()
# fig.suptitle("Gaussian fitting")
# fig.supylabel("counts (e-/pixel)")
# fit = spec.fitSepGauss(cent_wave[i - 1], aperture, init_back, axs=axs)
# fig.savefig(f"lines_fit_{i}.png", bbox_inches="tight")
# else:
fit = spec.fitSepGauss(cent_wave[i - 1], aperture, init_back, axs=None)
flux[i, :] = numpy.fabs(fit[:nlines])
cent_wave[i, :] = fit[nlines : 2 * nlines]
Expand Down
2 changes: 1 addition & 1 deletion python/lvmdrp/core/fluxcal.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def retrieve_header_stars(rss):
lat=-29.008999964 * u.deg, lon=-70.688663912 * u.deg, height=2800 * u.m
)
h = rss._header
slitmap = rss._slitmap[rss._slitmap["spectrographid"] == int(h["SPEC"][-1])]
slitmap = rss._slitmap
# retrieve the data for the 12 standards from the header
stddata = []
for i in range(12):
Expand Down
42 changes: 28 additions & 14 deletions python/lvmdrp/core/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -737,7 +737,7 @@ def measure_fiber_shifts(self, ref_image, columns=[500, 1000, 1500, 2000, 2500,
for j,c in enumerate(columns):
s1 = numpy.nanmedian(ref_data[50:-50,c-column_width:c+column_width], axis=1)
s2 = numpy.nanmedian(self._data[50:-50,c-column_width:c+column_width], axis=1)
_, shifts[j], _ = _cross_match_float(s1, s2, numpy.array([1.0]), [-5, 5])
_, shifts[j], _ = _cross_match_float(s1, s2, numpy.array([1.0]), shift_range)

return shifts

Expand Down Expand Up @@ -1983,7 +1983,7 @@ def fitPoly(self, axis="y", order=4, plot=-1):
new_img.setData(data=fit_result, mask=new_mask)
return new_img

def fitSpline(self, axis="y", degree=3, smoothing=0, use_weights=False):
def fitSpline(self, axis="y", degree=3, smoothing=0, use_weights=False, clip=None, interpolate_missing=True):
"""Fits a spline to the image along a given axis

Parameters
Expand All @@ -1997,6 +1997,10 @@ def fitSpline(self, axis="y", degree=3, smoothing=0, use_weights=False):
smoothing factor for the spline fit, by default 0
use_weights : bool, optional
whether to use the inverse variance as weights for the spline fit or not, by default False
clip : tuple, optional
minimum and maximum values to clip the spline model, by default None
interpolate_missing : bool, optional
interpolate coefficients if spline fitting failed

Returns
-------
Expand All @@ -2010,14 +2014,13 @@ def fitSpline(self, axis="y", degree=3, smoothing=0, use_weights=False):
self.swapaxes()

pixels = numpy.arange(self._dim[0])
models = []
models = numpy.zeros(self._dim)
for i in range(self._dim[1]):
good_pix = ~self._mask[:,i] if self._mask is not None else ~numpy.isnan(self._data[:,i])

# skip column if all pixels are masked
if good_pix.sum() == 0:
warnings.warn(f"Skipping column {i} due to all pixels being masked", RuntimeWarning)
models.append(numpy.zeros(self._dim[0]))
continue

# define spline fitting parameters
Expand All @@ -2043,15 +2046,14 @@ def fitSpline(self, axis="y", degree=3, smoothing=0, use_weights=False):

if len(groups) <= degree+1:
warnings.warn(f"Skipping column {i} due to insufficient data for spline fit", RuntimeWarning)
models.append(numpy.zeros(self._dim[0]))
continue

# collapse groups into single pixel
new_masked_pixels, new_data, new_vars = [], [], []
for group in groups:
new_masked_pixels.append(numpy.mean(masked_pixels[group]))
new_data.append(numpy.median(data[group]))
new_vars.append(numpy.mean(vars[group]))
new_masked_pixels.append(numpy.nanmean(masked_pixels[group]))
new_data.append(numpy.nanmedian(data[group]))
new_vars.append(numpy.nanmean(vars[group]))
masked_pixels = numpy.asarray(new_masked_pixels)
data = numpy.asarray(new_data)
vars = numpy.asarray(new_vars)
Expand All @@ -2062,11 +2064,23 @@ def fitSpline(self, axis="y", degree=3, smoothing=0, use_weights=False):
spline_pars = interpolate.splrep(masked_pixels, data, w=weights, s=smoothing)
else:
spline_pars = interpolate.splrep(masked_pixels, data, s=smoothing)
model = interpolate.splev(pixels, spline_pars)
models.append(model)
models[:, i] = interpolate.splev(pixels, spline_pars)

# reconstruct the model image
models = numpy.asarray(models).T
# clip spline fit if required
if clip is not None:
models = numpy.clip(models, clip[0], clip[1])

# interpolate failed columns if requested
masked_columns = numpy.count_nonzero((models == 0)|numpy.isnan(models), axis=0) >= 0.1*self._dim[0]
if interpolate_missing and masked_columns.any():
log.info(f"interpolating spline fit in {masked_columns.sum()} columns")
x_pixels = numpy.arange(self._dim[1])
f = interpolate.interp1d(x_pixels[~masked_columns], models[:, ~masked_columns], axis=1, bounds_error=False, fill_value="extrapolate")
models[:, masked_columns] = f(x_pixels[masked_columns])

# clip spline fit if required
if clip is not None:
models = numpy.clip(models, clip[0], clip[1])

# match orientation of the output array
if axis == "y" or axis == "Y" or axis == 0:
Expand Down Expand Up @@ -2340,7 +2354,7 @@ def trace_fiber_widths(self, fiber_centroids, ref_column=2000, ncolumns=40, nblo
integral_mod = integral_mod if integral_mod != 0 else numpy.nan
# NOTE: this is a hack to avoid integrating the whole column when tracing a few blocks
integral_dat = numpy.trapz(img_slice._data * (mod_slice>0), img_slice._pixels)
residuals.append((integral_dat - integral_mod) / integral_dat * 100)
residuals.append((integral_mod - integral_dat) / integral_dat * 100)

# compute fitted model stats
chisq_red = bn.nansum((mod_slice - img_slice._data)[~img_slice._mask]**2 / img_slice._error[~img_slice._mask]**2) / (self._dim[0] - 1 - 3)
Expand Down Expand Up @@ -2454,7 +2468,7 @@ def extractSpecOptimal(self, cent_trace, trace_fwhm, plot_fig=False):
mask = numpy.zeros((cent_trace._fibers, self._dim[1]), dtype="bool")

self._data = numpy.nan_to_num(self._data)
self._error = numpy.nan_to_num(self._error, nan=1e10)
self._error = numpy.nan_to_num(self._error, nan=numpy.inf)

# convert FWHM trace to sigma
trace_sigma = trace_fwhm / 2.354
Expand Down
3 changes: 3 additions & 0 deletions python/lvmdrp/core/spectrum1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -3282,10 +3282,13 @@ def obtainGaussFluxPeaks(self, pos, sigma, indices, replace_error=1e10, plot=Fal
).astype("int")
# defining bad pixels for each fiber if needed
if self._mask is not None:
# select: fibers in the boundary of the chip
bad_pix = numpy.zeros(fibers, dtype="bool")
select = bn.nansum(pixels >= self._mask.shape[0], 1)
nselect = numpy.logical_not(select)
bad_pix[select] = True

# masking fibers if all pixels are bad within aperture
bad_pix[nselect] = bn.nansum(self._mask[pixels[nselect, :]], 1) == aperture
else:
bad_pix = None
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
319
74.71 3593.5684 1
74.71 3593.5684 0
165.63 3650.1530 1
173.21 3654.8360 0
186.59 3663.2046 1
Expand Down
4 changes: 2 additions & 2 deletions python/lvmdrp/functions/fluxCalMethod.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,8 +342,8 @@ def fluxcal_Gaia(channel, in_rss, plot=True, GAIA_CACHE_DIR=None):
)

if plot:
plt.plot(wgood, sgood, "r.", markersize=4)
plt.plot(w, res[f"STD{nn}SEN"], linewidth=0.5)
plt.plot(wgood, sgood, ".k", markersize=2, zorder=-999)
plt.plot(w, res[f"STD{nn}SEN"], linewidth=1)
# plt.ylim(0,0.1e-11)

rms = biweight_scale(res.to_pandas().values, axis=1, ignore_nan=True)
Expand Down
Loading
Loading