Skip to content

Commit

Permalink
Merge branch 'master' into addbitmasks
Browse files Browse the repository at this point in the history
  • Loading branch information
ajmejia committed Dec 4, 2024
2 parents 56ed8e9 + 4d81c4f commit 1367e57
Show file tree
Hide file tree
Showing 51 changed files with 5,090 additions and 7,157 deletions.
9 changes: 7 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ distribute-*.tar.gz
*~

# Test directories and files
#data*
data*
lib
*.tar.gz*
test-*.fits
Expand All @@ -45,8 +45,11 @@ test_redux-*
tests/htmlcov
tests/*coverage*
*.fits
*.fits.gz
*.reg
*.csv

*.dylib
*.so
# Mac OSX
.DS_Store

Expand All @@ -60,3 +63,5 @@ de421.bsp

# notebooks
*checkpoint*

gaia_cache/
33 changes: 28 additions & 5 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,39 @@ Change Log

This document records the main changes to the drp code.

1.1.0 (unreleased)
1.1.1dev (unreleased)
------------------
- Honor MJD exclusion list stored in LVMCORE
- Fix crash in add_astrometry if guider frame ra,dec not present (early SV data)
- Fix rare failure in fiber model parameter measurements
- Fix failure due to case change in Gaia query results
- Fix race condition due to non-unique filenames in astroquery (fixed upstream)
- Fix NaNs in SCISEN* header keywords
- Filter out QAFLAG BAD exposures and do not reduce
- Catch Error on very early guider frames with no RA, DEC keywords

1.1.0 (30-10-2024)
------------------

- Adds a lockfile to drpall write, with 5 sec timeout, to prevent collisions during parallel writes.
- Adds `OBJECT` and `OBSTIME` header keys to the drpall summary file
- Implemented flux calibration using IFU field stars
- Improved twilight flat reduction and treatment for much improved fiberflats
- Add heliocentric velocity to headers
- More accurate resampling/rebinning code
- Improved wavelenth solution (especially in b channel) and more robust fits
- Allowing wavelength thermal shifts to vary along slit and improving wavelength/LSF fitting routines
- Fixing fiber thermal shifts
- Speed up tracing
- More robust and accurate fiber thermal shifts
- Add memory usage and execution time reporting
- Speed up pipeline across the board
- Adds a lockfile to drpall write, with 5 sec timeout, to prevent collisions during parallel writes.
- Adds `OBJECT` and `OBSTIME` header keys to the drpall summary file
- Added sky QA plots
- Improved QA plots across different routines
- Log exceptions to header COMMENT section
- Implement infrastructure for versioning for master calibration files
- Separate 2d reductions, extraction & 1d reduction, post-extraction stages, allow
to run each individually
- More flexible CLI for cluster job submission
- Many bugfixes & stability/robustness improvements

1.0.3 (29-05-2024)
------------------
Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,10 @@ To install the DRP along with its dependencies, you need to run the following st
cd lvmdrp
```


3. Install the DRP package in the current python environment (see [contributing](#contributing-to-lvm-drp-development) section below for a replacement of this step):
3. Install the DRP package in the current python environment (see [contributing](#contributing-to-lvm-drp-development) section below for a replacement of this step). Make sure you're back in the lvmdrp directory.

```bash
cd lvmdrp
pip install .
```

Expand Down
78 changes: 46 additions & 32 deletions bin/drp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@ import click
import cloup
from cloup.constraints import mutually_exclusive, RequireExactly, IsSet, If

from lvmdrp.main import run_drp, reduce_file, check_daily_mjd, parse_mjds, read_expfile
from lvmdrp.core.constants import CALIBRATION_MATCH
from lvmdrp.main import run_drp, reduce_file, check_daily_mjd, parse_mjds, read_expfile, create_drpall
from lvmdrp.functions.skyMethod import configureSkyModel_drp
from lvmdrp.utils.metadata import get_frames_metadata, get_master_metadata
from lvmdrp.utils.cluster import run_cluster
Expand All @@ -20,7 +21,8 @@ from lvmdrp.functions.run_calseq import (
COUNTS_THRESHOLDS,
reduce_nightly_sequence,
reduce_longterm_sequence,
fix_raw_pixel_shifts)
fix_raw_pixel_shifts,
copy_longterm_calibrations)

from sdss_access import Access

Expand Down Expand Up @@ -120,13 +122,15 @@ def cli():
@click.option('-2d', '--skip-2d', is_flag=True, default=False, help='Skip preprocessing and detrending')
@click.option('-1d', '--skip-1d', is_flag=True, default=False, help='Skip astrometry, straylight subtraction and extraction')
@click.option('-p1d', '--skip-post-1d', is_flag=True, default=False, help='Skip wavelength calibration, flatfielding, sky processing and flux calibration')
@click.option('-da', '--skip-drpall', is_flag=True, default=False, help='Skip create/update drpall summary file')
@click.option('-c', '--clean-ancillary', is_flag=True, default=False, help='Remove ancillary paths after run')
@click.option('-d', '--debug-mode', is_flag=True, default=False, help='Set debug mode on to run using aperture extraction and skip CR rejection')
@click.option('--force-run', is_flag=True, default=False, help='force reduction even if the exposure was flagged as BAD by the QC pipeline')
@cloup.constraint(mutually_exclusive, ['mjd', 'mjd_list', 'mjd_range'])
# @cloup.constraint(RequireExactly(1), ['mjd', 'mjd_list', 'mjd_range'])
@cloup.constraint(mutually_exclusive, ['expnum', 'exp_list', 'exp_range'])
def run(mjd, mjd_list, mjd_range, with_cals, no_sci, expnum, exp_list, exp_range, exp_file, fluxcal_method,
skip_2d, skip_1d, skip_post_1d, clean_ancillary, debug_mode):
skip_2d, skip_1d, skip_post_1d, skip_drpall, clean_ancillary, debug_mode, force_run):
""" Run the DRP reduction for a given MJD or range of MJDs
Run the DRP for an MJD or range of MJDs. Various flags and options are available
Expand All @@ -147,8 +151,10 @@ def run(mjd, mjd_list, mjd_range, with_cals, no_sci, expnum, exp_list, exp_range
skip_2d=skip_2d,
skip_1d=skip_1d,
skip_post_1d=skip_post_1d,
skip_drpall=skip_drpall,
clean_ancillary=clean_ancillary,
debug_mode=debug_mode)
debug_mode=debug_mode,
force_run=force_run)


# register full DRP command
Expand Down Expand Up @@ -229,52 +235,51 @@ cli.add_command(metacli)
@click.option('--qrtz-threshold', type=float, default=COUNTS_THRESHOLDS["quartz"], help='count thredhols for tracing with Quartz lamp exposures')
@click.option('--cent-ncolumns', type=int, default=140, help='number of columns to trace fiber centroids')
@click.option('--full-ncolumns', type=int, default=40, help='number of columns to full fiber tracing')
@click.option('-x', '--extract-md', is_flag=True, default=False, help='flag to extract metadata or use cached metadata if exist')
@click.option('-s', '--skip-done', is_flag=True, default=False, help='flag to skip reduction steps that have already been done')
@click.option('-c', '--clean-ancillary', is_flag=True, default=False, help='Remove ancillary paths after run')
@click.option('--fflats-from-to', type=int, default=[None, None], nargs=2, help='Move twilight fiberflats from one MJD to another MJD')
@click.option('-n', '--nightly', is_flag=True, default=False, help='flag to reduce nightly calibration sequence (defaults to long-term)')
@click.option('--fflats-from-to', type=int, default=((None, None),), nargs=2, multiple=True, help='Move twilight fiberflats from one MJD to another MJD')
@click.option('--nightly', is_flag=True, default=False, help='flag to reduce nightly calibration sequence (defaults to long-term)')
@click.option('--copy-to-sandbox', is_flag=True, default=False, help='copy long-term calibrations to sandbox calib folder')
@cloup.constraint(mutually_exclusive, ['mjd', 'mjd_list', 'mjd_range'])
@cloup.constraint(RequireExactly(1), ['mjd', 'mjd_list', 'mjd_range'])
def calibrations(mjd, mjd_list, mjd_range, reject_cr,
skip_bias, skip_trace, skip_wavelength,
skip_dome, skip_twilight,
ldls_threshold, qrtz_threshold,
cent_ncolumns, full_ncolumns,
skip_done, clean_ancillary,
fflats_from_to, nightly):
extract_md, skip_done, clean_ancillary,
fflats_from_to, nightly, copy_to_sandbox):
""" Run calibration sequence reduction """

only_cals = {flavor for flavor, skip in zip(["bias", "trace", "wave", "twilight"],
[skip_bias, skip_trace, skip_wavelength, skip_twilight]) if not skip}
only_cals = {flavor for flavor, skip in zip(["bias", "trace", "wave", "dome", "twilight"],
[skip_bias, skip_trace, skip_wavelength, skip_dome, skip_twilight]) if not skip}

# get MJDs for twilight fiberflats copies
fflats_from, fflats_to = fflats_from_to
if any(fflats_from_to) is None:
fflats_from = fflats_to = None
fflats_from, fflats_to = zip(*fflats_from_to)

# parse MJDs or
mjds = parse_mjds(mjd=mjd or mjd_list or mjd_range or None)
if not isinstance(mjds, (list, tuple)):
mjds = [mjds]

if nightly:
for mjd in mjds:
reduce_nightly_sequence(mjd=mjd, reject_cr=reject_cr,
use_longterm_cals=False,
only_cals=only_cals,
counts_thresholds={"ldls": ldls_threshold, "quartz": qrtz_threshold},
cent_guess_ncolumns=cent_ncolumns, trace_full_ncolumns=full_ncolumns,
skip_done=skip_done, keep_ancillary=not clean_ancillary,
fflats_from=fflats_from if fflats_to == mjd else None)
else:
for mjd in mjds:
reduce_longterm_sequence(mjd=mjd, reject_cr=reject_cr,
use_longterm_cals=True,
only_cals=only_cals,
counts_thresholds={"ldls": ldls_threshold, "quartz": qrtz_threshold},
cent_guess_ncolumns=cent_ncolumns, trace_full_ncolumns=full_ncolumns,
skip_done=skip_done, keep_ancillary=not clean_ancillary,
fflats_from=fflats_from if fflats_to == mjd else None)
# choose reduction routine
reduction_routine = reduce_nightly_sequence if nightly else reduce_longterm_sequence
# run reduction sequence
for mjd in mjds:
reduction_routine(mjd=mjd, reject_cr=reject_cr,
use_longterm_cals=not nightly,
only_cals=only_cals,
counts_thresholds={"ldls": ldls_threshold, "quartz": qrtz_threshold},
cent_guess_ncolumns=cent_ncolumns, trace_full_ncolumns=full_ncolumns,
extract_metadata=extract_md, skip_done=skip_done,
keep_ancillary=not clean_ancillary,
fflats_from=fflats_from[fflats_to.index(mjd)] if mjd in fflats_to else None)

if not nightly and copy_to_sandbox:
flavors = []
[flavors.extend(CALIBRATION_MATCH.get(calname, [calname])) for calname in only_cals]
copy_longterm_calibrations(mjd=mjd, flavors=flavors)

# register calibration sequence
cli.add_command(calibrations)
Expand Down Expand Up @@ -356,7 +361,7 @@ def get_calibs(kind, mjd, camera):
@click.option('-w', '--walltime', type=str, default="24:00:00", help='the time for which the job is allowed to run')
@click.option('-a', '--alloc', type=click.Choice(['sdss-np', 'sdss-kp']), default='sdss-np', help='which partition to use')
@click.option('-s', '--submit', is_flag=True, type=bool, default=True, help='flag to submit the job or not')
@click.option('--run-calibs', is_flag=True, type=bool, default=False, help="run long-term calibrations only")
@click.option('--run-calibs', is_flag=True, type=bool, default=False, help="run calibration sequences only")
@click.option('--drp-options', type=str, default="", help="pass options to drp run command")
def cluster(mjd_list, mjd_range, expnum, exp_list, exp_range, exp_file, nodes, ppn, walltime,
alloc, submit, run_calibs, drp_options):
Expand All @@ -373,5 +378,14 @@ def cluster(mjd_list, mjd_range, expnum, exp_list, exp_range, exp_file, nodes, p
run_calibs=run_calibs, drp_options=drp_options)


@cli.command('summary', short_help='Creates the drpall summary file for the given version of the pipeline')
@click.option('--drp-version', type=str, default=None, help='DRP version for which a drpall summary file will be created')
@click.option('-o', '--overwrite', is_flag=True, default=False, help='Overwrites any existing drpall file before creating a new one')
def summary(drp_version, overwrite):
""" Creates the DRP summary file for a given version of the DRP """

create_drpall(drp_version=drp_version, overwrite=overwrite)


if __name__ == "__main__":
cli()
8 changes: 7 additions & 1 deletion cextern/README.txt
Original file line number Diff line number Diff line change
@@ -1 +1,7 @@
C code to be compiled on setup goes here.
C++ code to generate fast_median extension goes here.

fast_median.cpp, fast_median.hpp: a very fast median filter for 1d and 2d ndarrays
with float or double data.
python/lvmdrp/external/fast_median.py: python interface for fast_median extension


Loading

0 comments on commit 1367e57

Please sign in to comment.