From 56c3434ebb43c844f5e407e547fb3ffb3ca3ead9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20S=C3=A1nchez-Gallego?= Date: Mon, 4 Sep 2023 14:04:04 -0700 Subject: [PATCH] Create co-added images (phase 2) (#6) * Rename refine to reprocess * Lots of refactor of camera WCS reprocessing * Calculate ZP for each frame * Add PA to frame data * Sigma-clip, fit, and calculate PA drift * Support processing spec frames * Add initial plotting * Add zero point plot * Allow to force using time range to select frames * Use UTC and getmtime * Fix cases when the reference WCS is empty * Move extract_sources out of Cameras and add MF pixels * Add placeholder columns to sources during extraction * Add CameraSolution and GuiderSolution classes * Missing colon * Add data model file * Use model to update PROC and SOURCES extensions * Major, major refactor * Move some code around * Remove GAIA_CACHE * Add MF WCS (!) and some header keywords * Allow to load FITS file into dataclass * Bump version to 0.3.0a0 * Change some types in SOURCES data model * Fix update_fits() * Add DIRNAME * Change header comment * Typing * post_init -> __post_init__ * Fix a few bugs * Fix merge issues * Update deps * Deal with empty dataframe when converting to Table * Some fixes * Some fixes * Remove table_from_dataframe * Use MF pixels for global solution * Use parquet for sources files * Close HDU list * Reset detections index to ensure Gaia match * Fix call to update_fits * Various fixes to guiding. Decrease guide tolerance * Guide in PA * Write PACORR to header * Minimum version to load dataclasses -> 0.4.0a0 * Prevent multiple rotation corrections * Disable rotation correction for now * Comment unused lines * Pretty complete rewrite of co-add functions using dataclasses * Rename master frame to full frame * Deal with spec telescope * Add camera name to log message * Typo in get_dark_subtrcted_data * Support dark subtraction in old files without DIRNAME * Support reprocessing old files. Fix some bugs. * Plots for FWHM, ZP, and PA * Add comments and remove redundant code * Add guider offsets plots * Various fixes to the datamodel and type casting * Fix corner case in populating the ap. phot DF * Fix dtypes that were not serializable * Save PA offset to guider solution and FITS * Improve logic to guide in PA * Fix repetitive message during acquisition * Set umask so that new files inherit the parent folder permissions * Tweaks to plotting --- Dockerfile | 3 + docs/sphinx/conf.py | 3 +- src/lvmguider/__init__.py | 5 - src/lvmguider/actor/commands/guide.py | 2 +- src/lvmguider/actor/commands/set_pixel.py | 4 +- src/lvmguider/astrometrynet.py | 15 +- src/lvmguider/cameras.py | 2 +- src/lvmguider/coadd.py | 2049 +++++++++------------ src/lvmguider/dataclasses.py | 317 +++- src/lvmguider/etc/datamodel.yml | 121 +- src/lvmguider/etc/lvmguider.yml | 10 + src/lvmguider/extraction.py | 21 +- src/lvmguider/guider.py | 121 +- src/lvmguider/plotting.py | 598 ++++++ src/lvmguider/tools.py | 294 ++- src/lvmguider/transformations.py | 43 +- 16 files changed, 2293 insertions(+), 1315 deletions(-) create mode 100644 src/lvmguider/plotting.py diff --git a/Dockerfile b/Dockerfile index fa18fc0..15e9c3b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,4 +15,7 @@ RUN pip3 install -U pip setuptools wheel RUN cd lvmguider && pip3 install . RUN rm -Rf lvmguider +# Set umask so that new files inherit the parent folder permissions. +RUN echo "umask 0002" >> /etc/bash.bashrc + ENTRYPOINT lvmguider actor start --debug diff --git a/docs/sphinx/conf.py b/docs/sphinx/conf.py index c4239a7..dd2135e 100644 --- a/docs/sphinx/conf.py +++ b/docs/sphinx/conf.py @@ -1,7 +1,8 @@ import os from pkg_resources import parse_version -from trurl import __version__ + +from lvmguider import __version__ # Are we building in RTD? diff --git a/src/lvmguider/__init__.py b/src/lvmguider/__init__.py index e740b40..8b2709a 100644 --- a/src/lvmguider/__init__.py +++ b/src/lvmguider/__init__.py @@ -3,14 +3,9 @@ import pathlib import warnings -from astropy.wcs import FITSFixedWarning - from sdsstools import Configuration, get_logger, get_package_version -warnings.simplefilter("ignore", category=FITSFixedWarning) - - NAME = "lvmguider" diff --git a/src/lvmguider/actor/commands/guide.py b/src/lvmguider/actor/commands/guide.py index 8216daa..59e4095 100644 --- a/src/lvmguider/actor/commands/guide.py +++ b/src/lvmguider/actor/commands/guide.py @@ -45,7 +45,7 @@ def is_stopping(command: GuiderCommand): @click.option( "--reference-pixel", type=click.Tuple((float, float)), - help="The pixel of the master frame to use as pointing reference.", + help="The pixel of the full frame to use as pointing reference.", ) @click.option( "--guide-tolerance", diff --git a/src/lvmguider/actor/commands/set_pixel.py b/src/lvmguider/actor/commands/set_pixel.py index 72f7b07..95593d0 100644 --- a/src/lvmguider/actor/commands/set_pixel.py +++ b/src/lvmguider/actor/commands/set_pixel.py @@ -26,7 +26,7 @@ @click.argument("PIXEL-X", type=float) @click.argument("PIXEL-Z", type=float) async def set_pixel(command: GuiderCommand, pixel_x: float, pixel_z: float): - """Sets the master frame pixel coordinates on which to guide. + """Sets the full frame pixel coordinates on which to guide. This command can be issued during active guiding to change the pointing of the telescope. @@ -43,7 +43,7 @@ async def set_pixel(command: GuiderCommand, pixel_x: float, pixel_z: float): @lvmguider_parser.command() async def reset_pixel(command: GuiderCommand): - """Resets the master frame pixel on which to guide to the central pixel.. + """Resets the full frame pixel on which to guide to the central pixel.. This command can be issued during active guiding to change the pointing of the telescope. diff --git a/src/lvmguider/astrometrynet.py b/src/lvmguider/astrometrynet.py index f6759d6..f742085 100644 --- a/src/lvmguider/astrometrynet.py +++ b/src/lvmguider/astrometrynet.py @@ -24,7 +24,8 @@ import pandas from astropy.io import fits from astropy.table import Table -from astropy.wcs import WCS +from astropy.units import UnitsWarning +from astropy.wcs import WCS, FITSFixedWarning PathLike = TypeVar("PathLike", pathlib.Path, str) @@ -478,16 +479,20 @@ def astrometrynet_quick( else: return solution - warnings.simplefilter("ignore") - wcs = WCS(open(wcs_output).read(), relax=True) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=FITSFixedWarning) + wcs = WCS(open(wcs_output).read(), relax=True) solution.wcs = wcs solution.stdout = open(stdout).read() solution.stderr = open(stderr).read() if os.path.exists(corr_file): - stars = Table.read(corr_file).to_pandas() - solution.stars = stars + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=UnitsWarning) + + stars = Table.read(corr_file).to_pandas() + solution.stars = stars return solution diff --git a/src/lvmguider/cameras.py b/src/lvmguider/cameras.py index 3b8346d..7b6f19f 100644 --- a/src/lvmguider/cameras.py +++ b/src/lvmguider/cameras.py @@ -169,7 +169,7 @@ async def expose( if len(sources) > 0: all_sources = pandas.concat(sources) valid = all_sources.loc[all_sources.valid == 1] - fwhm = numpy.median(valid["fwhm"]) if len(valid) > 0 else None + fwhm = float(numpy.median(valid["fwhm"])) if len(valid) > 0 else None else: valid = [] fwhm = None diff --git a/src/lvmguider/coadd.py b/src/lvmguider/coadd.py index 32f00b7..b9b5cef 100644 --- a/src/lvmguider/coadd.py +++ b/src/lvmguider/coadd.py @@ -8,850 +8,337 @@ from __future__ import annotations -import logging +import multiprocessing +import os import os.path import pathlib -from collections import defaultdict -from datetime import datetime from functools import partial -from typing import Any, cast +from typing import TYPE_CHECKING, Literal, Sequence import numpy import pandas -import peewee -import sep -from astropy.coordinates import SkyCoord from astropy.io import fits from astropy.stats import sigma_clip -from astropy.table import Table from astropy.time import Time -from astropy.wcs import WCS -from astropy.wcs.utils import fit_wcs_from_points -from psycopg2 import OperationalError -from scipy.spatial import KDTree +from packaging.version import Version from sdsstools.time import get_sjd -from lvmguider import config -from lvmguider import log as llog -from lvmguider.dataclasses import FrameData -from lvmguider.extraction import extract_marginal -from lvmguider.tools import get_db_connection, get_frameno, get_proc_path -from lvmguider.transformations import ag_to_master_frame, get_crota2, solve_locs -from lvmguider.types import ARRAY_1D_F32, ARRAY_2D_F32, ARRAY_2D_U16 - - -COADD_CAMERA_DEFAULT_PATH = ( - "coadds/lvm.{telescope}.agcam.{camname}.coadd_{frameno0:08d}_{frameno1:08d}.fits" +from lvmguider import config, log +from lvmguider.dataclasses import ( + CameraSolution, + CoAdd_CameraSolution, + FrameData, + GlobalSolution, + GuiderSolution, ) -MASTER_COADD_DEFAULT_PATH = ( - "coadds/lvm.{telescope}.agcam.coadd_{frameno0:08d}_{frameno1:08d}.fits" +from lvmguider.extraction import extract_marginal, extract_sources +from lvmguider.plotting import plot_qa +from lvmguider.tools import ( + angle_difference, + estimate_zeropoint, + get_dark_subtracted_data, + get_frameno, + get_guider_files_from_spec, + header_from_model, + nan_or_none, + polyfit_with_sigclip, + sort_files_by_camera, ) -MASTER_COADD_SPEC_PATH = "coadds/lvm.{{telescope}}.agcam.coadd_s{specno:08d}.fits" - -DRIFT_WARN_THRESHOLD: float = 0.1 - -_GAIA_CACHE: tuple[SkyCoord, pandas.DataFrame] | None = None - -XZ_AG_FRAME = config["xz_ag_frame"] -XZ_FULL_FRAME = config["xz_full_frame"] - - -def create_coadded_frame_header( - frame_data: list[FrameData], - sources: pandas.DataFrame, - matched: bool = True, - use_sigmaclip: bool = False, - sigma: int | None = None, - wcs: WCS | None = None, - is_master: bool = False, -): - """Creates the header object for a co-added frame.""" - - # Create a list with only the stacked frames and sort by frame number. - frames = list(sorted(frame_data, key=lambda x: x.frameno)) - stacked = list( - sorted( - [fd for fd in frame_data if fd.stacked], - key=lambda x: x.frameno, - ) - ) - - frame0 = frames[0].frameno - framen = frames[-1].frameno - stack0 = stacked[0].frameno - stackn = stacked[-1].frameno - - sjd = get_sjd("LCO", frames[0].date_obs.to_datetime()) - - fwhm_medians = [ff.fwhm_median for ff in stacked if ff.fwhm_median is not None] - fwhm0 = fwhmn = fwhm_median = None - - if len(fwhm_medians) > 0: - fwhm0 = numpy.round(fwhm_medians[0], 2) - fwhmn = numpy.round(fwhm_medians[-1], 2) - fwhm_median = numpy.round(numpy.median(fwhm_medians), 2) - - cofwhm = cofwhmst = None - if "fwhm" in sources and len(sources.fwhm.dropna()) > 0: - cofwhm = numpy.round(sources.fwhm.dropna().median(), 2) - if len(sources.fwhm.dropna()) > 1: - cofwhmst = numpy.round(sources.fwhm.dropna().std(), 2) - - zp = round(sources.zp.dropna().median(), 3) if matched else None - - # Determine the PA drift due to k-mirror tracking. - frame_wcs = [ - frame.wcs - for frame in stacked - if frame.wcs is not None and frame.guide_mode == "guide" - ] - crota2 = numpy.array(list(map(get_crota2, frame_wcs))) - - # Do some sigma clipping to remove big outliers (usually due to WCS errors). - if len(crota2) > 0: - crota2_masked = sigma_clip(crota2, 5, masked=True) - crota2_min = round(numpy.ma.min(crota2_masked), 6) - crota2_max = round(numpy.ma.max(crota2_masked), 6) - crota2_drift = round(abs(crota2_min - crota2_max), 6) - drift_warning = crota2_drift > DRIFT_WARN_THRESHOLD - else: - crota2_min = crota2_max = crota2_drift = None - drift_warning = True - - wcs_header = wcs.to_header() if wcs is not None else [] - - header = fits.Header() - - # Basic info - header["TELESCOP"] = (frames[0].telescope, " Telescope that took the image") - - if not is_master: - header["CAMNAME"] = (frames[0].camera, "Camera name") - - header["INSTRUME"] = ("LVM", "SDSS-V Local Volume Mapper") - header["OBSERVAT"] = ("LCO", "Observatory") - header["MJD"] = (sjd, "SDSS MJD (MJD+0.4)") +from lvmguider.transformations import ( + ag_to_full_frame, + get_crota2, + match_with_gaia, + solve_camera_with_astrometrynet, + wcs_from_gaia, +) +from lvmguider.types import ARRAY_2D_F32 - if is_master: - header["EXPTIME"] = (1.0, "[s] Exposure time") - header["PIXSIZE"] = (9.0, "[um] Pixel size") - header["PIXSCALE"] = (1.009, "[arcsec/pix]Scaled of unbinned pixel") - header.insert("TELESCOP", ("", "/*** BASIC DATA ***/")) +if TYPE_CHECKING: + from lvmguider.astrometrynet import AstrometrySolution - # Frame info - header["FRAME0"] = (frame0, "First frame in guide sequence") - header["FRAMEN"] = (framen, "Last frame in guide sequence") - header["NFRAMES"] = (framen - frame0 + 1, "Number of frames in sequence") - - if not is_master: - header["STACK0"] = (stack0, "First stacked frame") - header["STACKN"] = (stackn, "Last stacked frame") - header["NSTACKED"] = (stackn - stack0 + 1, "Number of frames stacked") - - if not is_master: - header["COESTIM"] = ("median", "Estimator used to stack data") - header["SIGCLIP"] = (use_sigmaclip, "Was the stack sigma-clipped?") - header["SIGMA"] = (sigma, "Sigma used for sigma-clipping") - - header["OBSTIME0"] = (frames[0].date_obs.isot, "DATE-OBS of first frame") - header["OBSTIMEN"] = (frames[-1].date_obs.isot, "DATE-OBS of last frame") - header["FWHM0"] = (fwhm0, "[arcsec] FWHM of sources in first frame") - header["FWHMN"] = (fwhmn, "[arcsec] FWHM of sources in last frame") - header["FHHMMED"] = (fwhm_median, "[arcsec] Median of the FHWM of all frames") - header["COFWHM"] = (cofwhm, "[arcsec] Co-added median FWHM") - header["COFWHMST"] = (cofwhmst, "[arcsec] Co-added FWHM standard deviation") - - if not is_master: - header["PAMIN"] = (crota2_min, "[deg] Minimum PA from WCS") - header["PAMAX"] = (crota2_max, "[deg] Maximum PA from WCS") - header["PADRIFT"] = (crota2_drift, "[deg] PA drift in frame range") - - header["ZEROPT"] = (zp, "[mag] Instrumental zero-point") - header.insert("FRAME0", ("", "/*** CO-ADDED PARAMETERS ***/")) - # Warnings - header["WARNPADR"] = (drift_warning, "PA drift > 0.1 degrees") - header["WARNTRAN"] = (False, "Transparency N magnitudes above photometric") - header["WARNMATC"] = (not matched, "Co-added frame could not be matched with Gaia") - header.insert("WARNPADR", ("", "/*** WARNINGS ***/")) +AnyPath = str | os.PathLike - if wcs is not None: - header.extend(wcs_header) - if is_master: - header.insert("WCSAXES", ("", "/*** MASTER FRAME WCS ***/")) - else: - header.insert("WCSAXES", ("", "/*** CO-ADDED WCS ***/")) - return header +MULTIPROCESS_MODE: Literal["frames"] | Literal["cameras"] = "frames" +MULTIPROCESS_NCORES: dict[str, int] = {"frames": 4, "cameras": 2} -def refine_camera_wcs( - wcs: WCS, - sources: pandas.DataFrame, - db_connection_params: dict[str, Any] = {}, - log: logging.Logger = llog, -): - """Refines a WCS by matching sources to Gaia positions and recreating the WCS. +def process_all_spec_frames(path: AnyPath, **kwargs): + """Processes all the spectrograph frames in a directory. Parameters ---------- - wcs - A WCS associated with the image. Used to determine the centre of the - image and perform a radial query in the database. - sources - A data frame with extracted sources. The data frame is returned - after adding the aperture photometry and estimated zero-points. - If the database connection fails or it is otherwise not possible - to calculate zero-points, the original data frame is returned. - db_connection_params - A dictionary of DB connection parameters to pass to `.get_db_connection`. + path + The path to the directory to process. This is usually a + ``/data/spectro/`` directory. + kwargs + Keyword arguments to pass to `.coadd_from_spec_frame`. """ - # Get RA/Dec of centre of frame. - skyc = wcs.pixel_to_world(*XZ_AG_FRAME) - - if isinstance(skyc, list): - log.error("Invalid WCS; cannot determine field centre.") - return (False, wcs) - - # Check if we can use the cached Gaia sources. - do_query: bool = True - gaia_df: pandas.DataFrame | None = None - - if _GAIA_CACHE is not None: - gaia_temp_sky, gaia_temp_df = _GAIA_CACHE - - if gaia_temp_sky.separation(skyc).deg < 0.01: - gaia_df = gaia_temp_df - do_query = False - - if do_query: - gaia_df = get_gaia_sources( - wcs, - db_connection_params=db_connection_params, - include_lvm_mags=False, - ) - - if gaia_df is None: - return (False, wcs) + path = pathlib.Path(path) + spec_files = sorted(path.glob("sdR-*-b1-*.fits.gz")) - matches, nmatches = match_with_gaia(wcs, sources, gaia_df, max_separation=5) + for file in spec_files: + # Skip cals and frames without any guider information. - if nmatches < 5: - log.warning("Insufficient number of matches. Cannot refine WCS.") - return (False, wcs) + header = fits.getheader(str(file)) - # Concatenate frames. - matched_sources = pandas.concat([sources, matches], axis=1) - matched_sources = matched_sources.loc[:, ["ra_epoch", "dec_epoch", "x", "y"]] - matched_sources.dropna(inplace=True) + if header["IMAGETYP"] != "object": + continue - skycoords = SkyCoord( - ra=matched_sources.ra_epoch, - dec=matched_sources.dec_epoch, - unit="deg", - frame="icrs", - ) - refined_wcs: WCS = fit_wcs_from_points( - (matched_sources.x, matched_sources.y), - skycoords, - ) + if header["EXPTIME"] < 120: + log.warning(f"Spec frame {file.name!s} is too short. Skipping.") + continue - return (True, refined_wcs) + coadd_from_spec_frame(file, **kwargs) -def get_gaia_sources( - wcs: WCS, - db_connection_params: dict[str, Any] = {}, - include_lvm_mags: bool = True, - log: logging.Logger = llog, +def coadd_from_spec_frame( + file: AnyPath, + outpath: str | None = None, + telescopes: list[str] = ["sci", "spec", "skye", "skyw"], + use_time_range: bool | None = None, + **kwargs, ): - """Returns a data frame with Gaia source information from a WCS. + """Processes the guider frames associated with an spectrograph file. Parameters ---------- - wcs - A WCS associated with the image. Used to determine the centre of the - image and perform a radial query in the database. - sources - A data frame with extracted sources. The data frame is returned - after adding the aperture photometry and estimated zero-points. - If the database connection fails or it is otherwise not possible - to calculate zero-points, the original data frame is returned. - include_lvm_mags - If `True`, match to ``lvm_magnitude`` and return LVM AG passband - magnitudes and fluxes. - db_connection_params - A dictionary of DB connection parameters to pass to `.get_db_connection`. + file + The spectro file to process. Guider frames will be selected + from the header keywords in the file. + output + The path where to save the global co-added frame. If `None`, + uses the default path which includes the frame number of the + spectro file. + telescopes + The list of telescope guider frames to process. + use_time_range + If `True`, selects guider frames from their timestamps instead + of using the headers in the spectro file. + kwargs + Keyword arguments to pass to `.create_global_coadd`. """ - global _GAIA_CACHE - - CAM_FOV = numpy.max(XZ_AG_FRAME) / 3600 - - conn = get_db_connection(**db_connection_params) - - try: - conn.connect() - except OperationalError as err: - log.error(f"Failed connecting to DB: {err}") - return None - - # Get RA/Dec of centre of frame. - skyc = wcs.pixel_to_world(*XZ_AG_FRAME) - ra: float = skyc.ra.deg - dec: float = skyc.dec.deg - - # Query lvm_magnitude and gaia_dr3_source in a radial query around RA/Dec. + outpath = outpath or config["coadds"]["paths"]["coadd_spec_path"] - gdr3_sch, gdr3_table = config["database"]["gaia_dr3_source_table"].split(".") - lmag_sch, lmag_table = config["database"]["lvm_magnitude_table"].split(".") - GDR3 = peewee.Table(gdr3_table, schema=gdr3_sch).bind(conn) - LMAG = peewee.Table(lmag_table, schema=lmag_sch).bind(conn) + file = pathlib.Path(file) + if not file.exists(): + raise FileExistsError(f"File {file!s} not found.") - if include_lvm_mags: - cte = ( - LMAG.select( - LMAG.c.source_id, - LMAG.c.lmag_ab, - LMAG.c.lflux, - ) - .where(peewee.fn.q3c_radial_query(LMAG.c.ra, LMAG.c.dec, ra, dec, CAM_FOV)) - .cte("cte", materialized=True) - ) + specno = int(file.name.split("-")[-1].split(".")[0]) + outpath = outpath.format(specno=specno) - query = ( - cte.select( - cte.star, - GDR3.c.ra, - GDR3.c.dec, - GDR3.c.pmra, - GDR3.c.pmdec, - GDR3.c.phot_g_mean_mag, - ) - .join(GDR3, on=(cte.c.source_id == GDR3.c.source_id)) - .with_cte(cte) - .dicts() + for telescope in telescopes: + log.info( + f"Generating co-added frame for {telescope!r} for " + f"spectrograph file {file!s}" ) - else: - query = ( - GDR3.select( - GDR3.c.ra, - GDR3.c.dec, - GDR3.c.pmra, - GDR3.c.pmdec, - GDR3.c.phot_g_mean_mag, + try: + log.debug("Identifying guider frames.") + frames = get_guider_files_from_spec( + file, + telescope=telescope, + use_time_range=use_time_range, ) - .where(peewee.fn.q3c_radial_query(GDR3.c.ra, GDR3.c.dec, ra, dec, CAM_FOV)) - .dicts() - ) - - with conn.atomic(): - conn.execute_sql("SET LOCAL work_mem='2GB'") - conn.execute_sql("SET LOCAL enable_seqscan=false") - data = query.execute(conn) + except Exception as err: + log.error(f"Cannot process {telescope!r} for {file!s}: {err}") + continue - df = pandas.DataFrame.from_records(data) + if len(frames) < 4: + log.warning(f"No guider frames found for {telescope!r} in {file!s}") + continue - _GAIA_CACHE = (skyc, df.copy()) + try: + create_global_coadd(frames, telescope=telescope, outpath=outpath, **kwargs) + except Exception as err: + log.critical( + f"Failed generating co-added frames for {file!s} on " + f"telescope {telescope!r}: {err}" + ) - return df +def create_global_coadd( + files: Sequence[AnyPath], + telescope: str, + outpath: str | None = "default", + save_camera_coadded: bool = False, + generate_qa: bool = True, + **coadd_camera_kwargs, +): + """Produces a global co-added frame. -def match_with_gaia( - wcs: WCS, - sources: pandas.DataFrame, - gaia_sources: pandas.DataFrame, - max_separation: float = 2, -) -> tuple[pandas.DataFrame, int]: - """Match detections to Gaia sources using nearest neighbours. + Calls `.coadd_camera` with the frames for each camera and produces + a single co-added file with the extensions from the camera co-added frames + and an extension ``COADD`` with the global astrometric solution of the + full frame. Parameters ---------- - wcs - The WCS associated with the detections. Used to determine pixels on the - image frame. - sources - A data frame with extracted sources. - gaia_sources - A data frame with Gaia sources to be matched. - max_separation - Maximum separation between detections and matched Gaia sources, in arcsec. + files + The list of files to co-add. Must include frames from all the cameras for + a given guider sequence. + telescope + The telescope associated with the images. + outpath + The path of the global co-added frame. If a relative path, it is written + relative to the path of the first file in the list. If `False`, the file + is not written to disk but the ``HDUList` is returned. With ``"default"``, + the default output path is used. + save_camera_coadded + Whether to write to disk the individual co-added images for each camera. + coadd_camera_kwargs + Arguments to pass to `.coadd_camera` for each set of camera frames. Returns ------- - matches - A tuple with the matched data frame and the number of matches. + global_solution + The `.GlobalSolution` instance with the information about the global + frame and solution. """ - # Epoch difference with Gaia DR3. - GAIA_EPOCH = 2016.0 - epoch = Time.now().jyear - epoch_diff = epoch - GAIA_EPOCH - - # Match detections with Gaia sources. Take into account proper motions. - ra_gaia: ARRAY_1D_F32 = gaia_sources.ra.to_numpy(numpy.float32) - dec_gaia: ARRAY_1D_F32 = gaia_sources.dec.to_numpy(numpy.float32) - pmra: ARRAY_1D_F32 = gaia_sources.pmra.to_numpy(numpy.float32) - pmdec: ARRAY_1D_F32 = gaia_sources.pmdec.to_numpy(numpy.float32) - - pmra_gaia = numpy.nan_to_num(pmra) / 1000 / 3600 # deg/yr - pmdec_gaia = numpy.nan_to_num(pmdec) / 1000 / 3600 - - ra_epoch = ra_gaia + pmra_gaia / numpy.cos(numpy.radians(dec_gaia)) * epoch_diff - dec_epoch = dec_gaia + pmdec_gaia * epoch_diff - - gaia_sources["ra_epoch"] = ra_epoch - gaia_sources["dec_epoch"] = dec_epoch - - # Calculate x/y pixels of the Gaia detections. We use origin 0 but the - # sep/SExtractor x/y in sources assume that the centre of the lower left - # pixel is (1,1) so we adjust the returned pixel values. - xpix, ypix = wcs.wcs_world2pix(ra_epoch, dec_epoch, 0) - gaia_sources["xpix"] = xpix + 0.5 - gaia_sources["ypix"] = ypix + 0.5 - - tree = KDTree(gaia_sources.loc[:, ["xpix", "ypix"]].to_numpy()) - dd, ii = tree.query(sources.loc[:, ["x", "y"]].to_numpy()) - valid = dd < max_separation - - # Get Gaia rows for the valid matches. Change their indices to those - # of their matching sources (which are 0..len(sources)-1 since we reindexed). - matches = gaia_sources.iloc[ii[valid]] - matches.index = numpy.arange(len(ii))[valid] - - return matches, valid.sum() - - -def estimate_zeropoint( - coadd_image: ARRAY_2D_F32, - sources: pandas.DataFrame, - wcs: WCS, - gain: float = 5, - max_separation: float = 2, - ap_radius: float = 6, - ap_bkgann: tuple[float, float] = (6, 10), - log: logging.Logger | None = None, - db_connection_params: dict[str, Any] = {}, -): - """Determines the ``lmag`` zeropoint for each source. - - Parameters - ---------- - sources - A data frame with extracted sources. The data frame is returned - after adding the aperture photometry and estimated zero-points. - If the database connection fails or it is otherwise not possible - to calculate zero-points, the original data frame is returned. - wcs - A WCS associated with the image. Used to determine the centre of the - image and perform a radial query in the database. - gain - The gain of the detector in e-/ADU. - max_separation - Maximum separation between detections and matched Gaia sources, in arcsec. - ap_radius - The radius to use for aperture photometry, in pixels. - ap_bkgann - The inner and outer radii of the annulus used to determine the background - around each source. - log - An instance of a logger to be used to output messages. If not provided - defaults to the package log. - db_connection_params - A dictionary of DB connection parameters to pass to `.get_db_connection`. + if outpath == "default": + outpath = config["coadds"]["paths"]["coadd_path"] - """ + coadd_camera_kwargs = coadd_camera_kwargs.copy() + if save_camera_coadded is False: + coadd_camera_kwargs["outpath"] = None - # Camera FOV in degrees. Approximate, just for initial radial query. - PIXSCALE = 1.009 # arcsec/pix + camera_to_files = sort_files_by_camera(files) + cameras = [camera for camera in camera_to_files if len(camera_to_files[camera]) > 0] + camera_files = [camera_to_files[camera] for camera in cameras] - log = log or llog + frame_nos = set([get_frameno(file) for file in files]) - gaia_df = get_gaia_sources(wcs, db_connection_params=db_connection_params, log=log) - if gaia_df is None: - log.error("Cannot match sources with Gaia DR3.") - return (False, sources) + # Co-add each camera independently. + coadd_camera_partial = partial(coadd_camera, **coadd_camera_kwargs) - if len(gaia_df) == 0: - log.warning("No Gaia sources found. Cannot estimate zero point.") - return (False, sources) + if MULTIPROCESS_MODE == "cameras": + with multiprocessing.Pool(MULTIPROCESS_NCORES["cameras"]) as pool: + coadd_solutions = list(pool.map(coadd_camera_partial, camera_files)) + else: + coadd_solutions = list(map(coadd_camera_partial, camera_files)) - # Reset index of sources. - sources = sources.reset_index(drop=True) + # Now create a global solution. + root = pathlib.Path(files[0]).parent - matches, nmatches = match_with_gaia( - wcs, - sources, - gaia_df, - max_separation=max_separation, - ) + get_guider_solutions_p = partial(get_guider_solutions, root, telescope) + if MULTIPROCESS_MODE == "cameras": + with multiprocessing.Pool(MULTIPROCESS_NCORES["cameras"]) as pool: + guider_solutions = list(pool.map(get_guider_solutions_p, list(frame_nos))) + else: + guider_solutions = map(get_guider_solutions_p, list(frame_nos)) - if nmatches < 5: - log.error("Insufficient number of matches. Cannot produce ZPs.") - return (False, sources) - - # Concatenate frames. - sources = pandas.concat([sources, matches], axis=1) - - # Calculate the separation between matched sources. Drop xpix/ypix. - dx = sources.xpix - sources.x - dy = sources.ypix - sources.y - sources["match_sep"] = numpy.hypot(dx, dy) * PIXSCALE - sources.drop(columns=["xpix", "ypix"], inplace=True) - - # Add master frame pixels. - xy = sources.loc[:, ["x", "y"]].to_numpy() - camera = sources.iloc[0]["camera"] - telescope = sources.iloc[0]["telescope"] - mf_locs, _ = ag_to_master_frame(f"{telescope}-{camera[0]}", xy) - sources.loc[:, ["x_mf", "y_mf"]] = mf_locs - - # Do aperture photometry around the detections. - flux_adu, fluxerr_adu, _ = sep.sum_circle( - coadd_image, - sources.x, - sources.y, - ap_radius, - bkgann=ap_bkgann, - gain=gain, + gs = GlobalSolution( + coadd_solutions=coadd_solutions, + guider_solutions=[gs for gs in guider_solutions if gs is not None], + telescope=telescope, ) - # Calculate zero point. By definition this is the magnitude - # of an object that produces 1 count per second on the detector. - # For an arbitrary object producing DT counts per second then - # m = -2.5 x log10(DN) - ZP - zp = -2.5 * numpy.log10(flux_adu * gain) - sources.lmag_ab - - sources["ap_flux"] = flux_adu - sources["ap_fluxerr"] = fluxerr_adu - sources["zp"] = zp - - return (True, sources) - + # Fit a new WCS from the individual co-added solutions using the full frame. + sources = gs.sources + if telescope != "spec": + if len(sources.ra.dropna()) > 5: + gs.wcs = wcs_from_gaia(sources, xy_cols=["x_ff", "y_ff"]) + else: + log.warning("Unable to fit global WCS. Not enough matched sources.") -def get_guide_dataframe(frames: list[FrameData]): - """Creates a data frame with guide information from ``proc-`` files.""" + if outpath is not None: + # Create the path for the output file. + date_obs = fits.getval(files[0], "DATE-OBS", "RAW") + sjd = get_sjd("LCO", Time(date_obs, format="isot").to_datetime()) - guide_records: list[dict] = [] - _processed_proc_files: list[pathlib.Path] = [] + frameno0 = min(frame_nos) + frameno1 = max(frame_nos) + outpath = outpath.format( + telescope=telescope, + frameno0=frameno0, + frameno1=frameno1, + mjd=sjd, + ) - for frame in frames: - proc_file = frame.proc_file + if pathlib.Path(outpath).is_absolute(): + path = pathlib.Path(outpath) + else: + path = root / outpath - if proc_file is None or not proc_file.exists(): - continue + if path.parent.exists() is False: + path.parent.mkdir(parents=True, exist_ok=True) - if proc_file in _processed_proc_files: - continue + gs.path = path - hdul = fits.open(proc_file) - if "ASTROMETRY" not in hdul or "SOURCES" not in hdul: - continue + log.info(f"Writing co-added frame to {path.absolute()!s}") - astrom = hdul["ASTROMETRY"].header - - sources = pandas.DataFrame(hdul["SOURCES"].data) - - wcs = WCS(astrom) - - record = dict( - frameno=get_frameno(astrom["FILE0"]), - date=astrom["DATE"] if "DATE" in astrom else numpy.nan, - n_sources=len(sources), - fwhm=frame.fwhm_median or numpy.nan, - telescope=frame.telescope, - ra=astrom["RAMEAS"], - dec=astrom["DECMEAS"], - ra_offset=astrom["OFFRAMEA"], - dec_offset=astrom["OFFDEMEA"], - separation=numpy.hypot(astrom["OFFRAMEA"], astrom["OFFDEMEA"]), - ra_corr=astrom["RACORR"], - dec_corr=astrom["DECORR"], - ax0_corr=astrom["AX0CORR"], - ax1_corr=astrom["AX1CORR"], - mode="acquisition" if astrom["ACQUISIT"] else "guide", - pa=get_crota2(wcs), - wcs_mode=frame.wcs_mode or "", + # Write global header and co-added frames. + hdul = fits.HDUList( + [ + fits.PrimaryHDU(), + fits.ImageHDU( + data=None, + header=create_global_header(gs), + name="GLOBAL", + ), + ] ) - guide_records.append(record) - - df = pandas.DataFrame.from_records(guide_records) - df.sort_values("frameno", inplace=True) - - return df - - -def framedata_to_dataframe(frame_data: list[FrameData]): - """Converts a list of frame data to a data frame (!).""" - - records: list[dict] = [] - for fd in frame_data: - records.append( - dict( - frameno=fd.frameno, - date_obs=fd.date_obs.isot, - camera=fd.camera, - telescope=fd.telescope, - exptime=fd.exptime, - kmirror_drot=fd.kmirror_drot, - focusdt=fd.focusdt, - guide_error=fd.guide_error, - fwhm_median=fd.fwhm_median, - fwhm_std=fd.fwhm_std, - guide_mode=fd.guide_mode, - stacked=int(fd.stacked), + for coadd in gs.coadd_solutions: + hdul.append( + fits.CompImageHDU( + data=coadd.coadd_image, + header=create_coadd_header(coadd), + ) ) - ) - - df = pandas.DataFrame.from_records(records) - df = df.sort_values(["frameno", "camera"]) - - return df - -def get_frame_range( - path: str | pathlib.Path, - telescope: str, - frameno0: int, - frameno1: int, - camera: str | None = None, -) -> list[pathlib.Path]: - """Returns a list of AG frames in a frame number range. - - Parameters - ---------- - path - The directory where the files are written to. - telescope - The telescope for which to select frames. - frameno0 - The initial frame number. - frameno1 - The final frame number. - camera - The camera, east or west, for which to select frames. If not provided - selects both cameras. + hdul.writeto(str(path), overwrite=True) - Returns - ------- - frames - A sorted list of frames in the desired range. + # Write sources to file. + if gs.sources is not None: + sources_path = path.with_name(path.stem + "_sources.parquet") + log.debug(f"Writing co-added sources to {sources_path!s}") + gs.sources.to_parquet(sources_path) - """ + # Write guider data. + guider_data = gs.guider_data() + frames_path = path.with_name(path.stem + "_guiderdata.parquet") + log.debug(f"Writing guide data to {frames_path!s}") + guider_data.to_parquet(frames_path) - if camera is None: - files = pathlib.Path(path).glob(f"lvm.{telescope}.agcam.*.fits") - else: - files = pathlib.Path(path).glob(f"lvm.{telescope}.agcam.{camera}_*.fits") + # Write frame data. + frame_data = gs.frame_data() + frames_path = path.with_name(path.stem + "_frames.parquet") + log.debug(f"Writing frame data to {frames_path!s}") + frame_data.to_parquet(frames_path) - selected: list[pathlib.Path] = [] - for file in files: - frameno = get_frameno(file) - if frameno >= frameno0 and frameno <= frameno1: - selected.append(file) + if generate_qa: + log.info(f"Generating QA plots for {path.absolute()!s}") + plot_qa(gs) - return list(sorted(selected)) + return gs -def get_framedata( - camname: str, - telescope: str, - file: pathlib.Path, - log: logging.Logger = llog, - db_connection_params: dict = {}, -): - """Collects information from a file into a `.FrameData` object.""" - - with fits.open(str(file)) as hdul: - # RAW header - raw_header = hdul["RAW"].header - - frameno = get_frameno(file) - log_h = f"Frame {telescope}-{camname}-{frameno}:" - - # Do some sanity checks. - if raw_header["CAMNAME"] != camname: - raise ValueError(f"{log_h} multiple cameras found in list of frames.") - - if raw_header["TELESCOP"] != telescope: - raise ValueError(f"{log_h} multiple telescopes found in list of frames.") - - # PROC header of the raw AG frame. - if "PROC" not in hdul: - log.warning(f"{log_h} PROC extension not found.") - proc_header = None - else: - proc_header = hdul["PROC"].header - - # Determine the median FWHM and FWHM deviation for this frame. - if "SOURCES" not in hdul: - log.warning(f"{log_h} SOURCES extension not found.") - sources = None - fwhm_median = None - fwhm_std = None - else: - sources = pandas.DataFrame(hdul["SOURCES"].data) - valid = sources.loc[(sources.xfitvalid == 1) & (sources.yfitvalid == 1)] - - fwhm = 0.5 * (valid.xstd + valid.ystd) - fwhm_median = float(numpy.median(fwhm)) - fwhm_std = float(numpy.std(fwhm)) - - # Get the proc- file. Just used to determine - # if we were acquiring or guiding. - proc_file = get_proc_path(file) - if not proc_file.exists(): - log.warning(f"{log_h} missing associated proc- file.") - proc_astrometry = None - guide_error = None - proc_file = None - else: - proc_astrometry = fits.getheader(str(proc_file), "ASTROMETRY") - guide_error = numpy.hypot( - proc_astrometry["OFFRAMEA"], - proc_astrometry["OFFDEMEA"], - ) - - wcs_mode = None - wcs = None - if proc_header is not None and "WCSMODE" in proc_header: - wcs_mode = proc_header["WCSMODE"] - - wcs = WCS(proc_header) - - if proc_header["WCSMODE"] == "astrometrynet": - pass - else: - if "GUIDERV" in proc_header and proc_header["GUIDERV"] > "0.4.0": - pass - elif sources is not None and proc_astrometry is not None: - # This is relevant for early PROC extensions during guiding - # in which the WCS was generated by translating the reference - # frame WCS. - - # There was a bug that in some cases caused the WCS CRVAL - # to drift as more offsets were accumulated. So let's get - # the reference image WCS and use that. - ref_file: str | None = None - for key in ["REFFILE0", "REFFILE1"]: - if camname in proc_astrometry[key]: - ref_file = proc_astrometry[key] - break - - if ref_file is None: - log.error( - f"{log_h} cannot find reference file for {file!s}. " - "Cannot refine WCS." - ) - else: - ref_header = fits.getheader(ref_file, "PROC") - ref_wcs = WCS(ref_header) - - log.debug(f"Refining WCS for frame {camname}-{frameno}.") - (refine_ok, refine_wcs) = refine_camera_wcs( - ref_wcs, - sources, - db_connection_params=db_connection_params, - log=log, - ) - if refine_ok: - wcs = refine_wcs - else: - log.warning(f"{log_h} failed refining WCS.") - wcs = None - - # If we have not yet loaded the dark frame, get it and get the - # normalised dark. - dark: ARRAY_2D_F32 | None = None - if proc_header and proc_header.get("DARKFILE", None): - hdul_dark = fits.open(str(proc_header["DARKFILE"])) - - dark_data: ARRAY_2D_U16 = hdul_dark["RAW"].data.astype(numpy.float32) - dark_exptime: float = hdul_dark["RAW"].header["EXPTIME"] - - dark = dark_data / dark_exptime - - hdul_dark.close() - - # Get the frame data and exposure time. Calculate the counts per second. - exptime = float(hdul["RAW"].header["EXPTIME"]) - data: ARRAY_2D_F32 = hdul["RAW"].data.astype(numpy.float32) / exptime - - # If we have a dark frame, subtract it now. If not fit a - # background model and subtract that. - if dark is not None: - data -= dark - else: - log.debug(f"{log_h} dark frame not found. Fitting background.") - back = sep.Background(data) - data -= back.back() - - # Decide whether this frame should be stacked. - if proc_astrometry: - guide_mode = "acquisition" if proc_astrometry["ACQUISIT"] else "guide" - if guide_mode == "acquisition": - stacked = False - else: - stacked = True - else: - guide_mode = "none" - stacked = False - - obs_time = Time(raw_header["DATE-OBS"], format="isot") - - # Add information as a FrameData. We do not include the data itself - # because it's in data_stack and we don't need it beyond that. - return FrameData( - file=pathlib.Path(file), - frameno=frameno, - raw_header=raw_header, - proc_header=proc_header, - proc_file=proc_file, - camera=raw_header["CAMNAME"], - telescope=raw_header["TELESCOP"], - date_obs=obs_time, - sjd=get_sjd("LCO", obs_time.to_datetime()), - exptime=exptime, - image_type=raw_header["IMAGETYP"], - kmirror_drot=raw_header["KMIRDROT"], - focusdt=raw_header["FOCUSDT"], - fwhm_median=fwhm_median, - fwhm_std=fwhm_std, - guide_error=guide_error, - guide_mode=guide_mode, - stacked=stacked, - wcs=wcs, - wcs_mode=wcs_mode, - data=data, - ) - - -def coadd_camera_frames( - files: list[str | pathlib.Path], - outpath: str | None = COADD_CAMERA_DEFAULT_PATH, +def coadd_camera( + files: Sequence[AnyPath], + outpath: str | None = "default", use_sigmaclip: bool = False, sigma: float = 3.0, - skip_acquisition_after_guiding: bool = False, database_profile: str = "default", database_params: dict = {}, - log: logging.Logger | None = None, - verbose: bool = False, - quiet: bool = False, + overwrite_reprocess: bool = False, ): """Co-adds a series of AG camera frames. This routine does the following: - - Selects all the frames from the end of the initial acquisition. + - Receives a range of guider frames that usually correspond to a full guider + sequence. - For each frame in the range to co-add, substracts the dark current frame and derives counts per second. - Co-adds the frames using a sigma-clipped median. @@ -876,59 +363,41 @@ def coadd_camera_frames( outpath The path of the co-added frame. If a relative path, it is written relative to the path of the first file to be co-added. If `None`, the file is not - written to disk but the ``HDUList` is returned. + written to disk but the ``HDUList` is returned. If the value is + ``"default"``, a default path is used. use_sigmaclip Whether to use sigma clipping when combining the stack of data. Disabled by default as it uses significant CPU and memory. sigma The sigma value for co-addition sigma clipping. - skip_acquisition_after_guiding - Images are co-added starting on the first frame marked as ``'guide'`` - and until the last frame. If ``skip_acquisition_after_guiding=True``, - ``'acquisition'`` images found after the initial acquisition are - discarded. database_profile Profile name to use to connect to the database and query Gaia. database_params Additional database parameters used to override the profile. - log - An instance of a logger to be used to output messages. If not provided - defaults to the package log. - verbose - Increase verbosity of the logging. - quiet - Do not output log messages. + overwrite_reprocess + Whether to overwrite reprocessed files or use already existing ones. Returns ------- - hdul,frame_data - A tuple with the ``HDUList`` object with the co-added frame, and - the frame data. + camera_solution + The `.CoAdd_CameraSolution` object holding the information about + the co-added frame. """ db_connection_params = {"profile": database_profile, **database_params} - # Create log and set verbosity - log = log or llog - for handler in log.handlers: - if verbose: - handler.setLevel(logging.DEBUG) - elif quiet: - handler.setLevel(logging.CRITICAL) + if outpath == "default": + outpath = config["coadds"]["paths"]["coadd_camera_path"] # Get the list of frame numbers from the files. - files = list(sorted(files)) - paths = [pathlib.Path(file) for file in files] + paths = sorted([pathlib.Path(file) for file in files]) frame_nos = sorted([get_frameno(path) for path in paths]) # Use the first file to get some common data (or at least it should be common!) sample_raw_header = fits.getheader(paths[0], "RAW") - gain: float = sample_raw_header["GAIN"] - ra: float = sample_raw_header["RA"] - dec: float = sample_raw_header["DEC"] camname: str = sample_raw_header["CAMNAME"] telescope: str = sample_raw_header["TELESCOP"] dateobs0: str = sample_raw_header["DATE-OBS"] @@ -941,115 +410,73 @@ def coadd_camera_frames( # Loop over each file, add the dark-subtracked data to the stack, and collect # frame metadata. - get_framedata_partial = partial( - get_framedata, - camname, - telescope, + create_framedata_partial = partial( + create_framedata, db_connection_params=db_connection_params, - log=log, + overwrite_reprocess=overwrite_reprocess, ) - frame_data = list(map(get_framedata_partial, paths)) + if MULTIPROCESS_MODE == "frames": + with multiprocessing.Pool(MULTIPROCESS_NCORES["frames"]) as pool: + _frames = list(pool.map(create_framedata_partial, paths)) + else: + _frames = list(map(create_framedata_partial, paths)) + + frames = [frame for frame in _frames if frame is not None] data_stack: list[ARRAY_2D_F32] = [] - for fd in frame_data: - data = fd.data + for fd in frames: + if fd.data is not None and fd.stacked is True: + data_stack.append(fd.data) fd.data = None - if data is None or fd.stacked is False: - del data - continue + coadd_image: ARRAY_2D_F32 | None = None + coadd_solution: CoAdd_CameraSolution - guide_mode = fd.guide_mode - skip_acquisition = len(data_stack) == 0 or skip_acquisition_after_guiding - if guide_mode == "acquisition" and skip_acquisition: - fd.stacked = False + if len(data_stack) == 0 or telescope == "spec": + if telescope == "spec": + log.debug(f"Not stacking data for telecope {telescope!r}.") else: - data_stack.append(data) - - del data - - if len(data_stack) == 0: - log.error(f"No stack data for {telescope!r}.") - return None, frame_data - - # Combine the stack of data frames using the median of each pixel. - # Optionally sigma-clip each pixel (this is computationally intensive). - if use_sigmaclip: - log.debug(f"Sigma-clipping stack with sigma={sigma}") - stack_masked = sigma_clip( - numpy.array(data_stack), - sigma=int(sigma), - maxiters=3, - masked=True, - axis=0, - copy=False, - ) + log.error(f"No data to stack for {telescope!r}.") - log.debug("Creating median-combined co-added frame.") - coadd: ARRAY_2D_F32 = numpy.ma.median(stack_masked, axis=0).data + coadd_solution = CoAdd_CameraSolution( + frameno=-1, + camera=camname, + telescope=telescope, + ) else: - log.debug("Creating median-combined co-added frame.") - coadd: ARRAY_2D_F32 = numpy.median(numpy.array(data_stack), axis=0) + log.debug(f"Creating median-combined co-added frame for {camname!r}.") + + # Combine the stack of data frames using the median of each pixel. + # Optionally sigma-clip each pixel (this is computationally intensive). + if use_sigmaclip: + log.debug(f"Sigma-clipping stack with sigma={sigma}") + stack_masked = sigma_clip( + numpy.array(data_stack), + sigma=int(sigma), + maxiters=3, + masked=True, + axis=0, + copy=False, + ) + coadd_image = numpy.ma.median(stack_masked, axis=0).data - del data_stack + else: + coadd_image = numpy.median(numpy.array(data_stack), axis=0) - # Extract sources in the co-added frame. - coadd_sources = extract_marginal( - coadd, - box_size=31, - threshold=3.0, - max_detections=50, - sextractor_quick_options={"minarea": 5}, - ) - coadd_sources["telescope"] = telescope - coadd_sources["camera"] = camname - - # Get astrometry.net solution. - camera_solution = solve_locs( - coadd_sources.loc[:, ["x", "y", "flux"]], - ra, - dec, - full_frame=False, - raise_on_unsolved=False, - ) + del data_stack + assert coadd_image is not None - if camera_solution.solved is False: - log.warning("Cannot determine astrometric solution for co-added frame.") - wcs = None - matched = False - # TODO: if this fails we could still use kd-tree and Gaia but we need - # a better estimate of the RA/Dec of the centre of the camera. - else: - wcs = camera_solution.wcs - matched, coadd_sources = estimate_zeropoint( - coadd, - coadd_sources, - wcs, - gain=gain, - log=log, + coadd_solution = process_camera_coadd( + coadd_image, + frames, db_connection_params=db_connection_params, ) - # Construct the header. - header = create_coadded_frame_header( - list(frame_data), - coadd_sources, - matched=matched, - use_sigmaclip=use_sigmaclip, - sigma=int(sigma) if use_sigmaclip else None, - wcs=wcs, - ) - - # Get frame data table. - frame_data_df = framedata_to_dataframe(list(frame_data)) - - # Create the co-added HDU list. - hdul = fits.HDUList([fits.PrimaryHDU()]) - hdul.append(fits.CompImageHDU(data=coadd, header=header, name="COADD")) - hdul.append(fits.BinTableHDU(Table.from_pandas(coadd_sources), name="SOURCES")) - hdul.append(fits.BinTableHDU(Table.from_pandas(frame_data_df), name="FRAMEDATA")) + coadd_solution.frames = frames + coadd_solution.sigmaclip = use_sigmaclip + coadd_solution.sigmaclip_sigma = sigma if use_sigmaclip else None if outpath is not None: # Create the path for the output file. @@ -1064,355 +491,707 @@ def coadd_camera_frames( ) if pathlib.Path(outpath).is_absolute(): - outpath_full = pathlib.Path(outpath) + path = pathlib.Path(outpath) else: - outpath_full = pathlib.Path(paths[0]).parent / outpath + path = pathlib.Path(paths[0]).parent / outpath - if outpath_full.parent.exists() is False: - outpath_full.parent.mkdir(parents=True, exist_ok=True) + if path.parent.exists() is False: + path.parent.mkdir(parents=True, exist_ok=True) - log.info(f"Writing co-added frame to {outpath_full.absolute()!s}") - hdul.writeto(str(outpath_full), overwrite=True) + coadd_solution.path = path - hdul.close() + log.info(f"Writing co-added frame to {path.absolute()!s}") + + # Write co-addded image and header. + hdul = fits.HDUList( + [ + fits.PrimaryHDU(), + fits.CompImageHDU( + data=coadd_image, + header=create_coadd_header(coadd_solution), + name="COADD", + ), + ] + ) + hdul.writeto(str(path), overwrite=True) + + # Write sources to file. + if coadd_solution.sources is not None: + sources_path = path.with_name(path.stem + "_sources.parquet") + log.debug(f"Writing co-added sources to {sources_path!s}") + coadd_solution.sources.to_parquet(sources_path) - return hdul, frame_data + # Write frame data. + frame_data = coadd_solution.frame_data() + frames_path = path.with_name(path.stem + "_frames.parquet") + log.debug(f"Writing frame data to {frames_path!s}") + frame_data.to_parquet(frames_path) + return coadd_solution -def create_master_coadd( - files: list[str] | list[pathlib.Path], - outpath: str | None = MASTER_COADD_DEFAULT_PATH, - save_camera_coadded_frames: bool = False, - log: logging.Logger | None = None, - verbose: bool = False, - quiet: bool = False, - **coadd_camera_frames_kwargs, + +def create_framedata( + path: pathlib.Path, + db_connection_params: dict = {}, + overwrite_reprocess: bool = False, ): - """Produces a master co-added frame. + """Collects information from a guider frame into a `.FrameData` object.""" - Calls `.coadd_camera_frames` with the frames for each camera and produces - a single co-added file with the extensions from the camera co-added frames - and an extension ``MASTER`` with the global astrometric solution of the - master frame. + # Copy the original path. + orig_path: pathlib.Path = path + reprocessed: bool = False - Parameters - ---------- - files - The list of files to co-add. Must include frames from all the cameras for - a given guide sequences. - outpath - The path of the master co-added frame. If a relative path, it is written - relative to the path of the first file in the list. If `None`, the file - is not written to disk but the ``HDUList` is returned. - save_camera_coadded_frames - Whether to write to disk the individual co-added images for each camera. - log - An instance of a logger to be used to output messages. If not provided - defaults to the package log. - verbose - Increase verbosity of the logging. - quiet - Do not output log messages. - coadd_camera_frames_kwargs - Arguments to pass to `.coadd_camera_frames` for each set of camera frames. + if not path.exists(): + log.error(f"Cannot find frame {path!s}") + return None - Returns - ------- - hdul - An ``HDUList`` object with the maste co-added frame. + try: + guiderv = fits.getval(str(path), "GUIDERV", "PROC") + except Exception: + guiderv = "0.0.0" - """ + if Version(guiderv) < Version("0.4.0a0"): + new_path = reprocess_agcam( + path, + db_connection_params=db_connection_params, + overwrite=overwrite_reprocess, + ) + if new_path is not None: + reprocessed = True + path = new_path - # Create log and set verbosity - log = log or llog - for handler in log.handlers: - if verbose: - handler.setLevel(logging.DEBUG) - elif quiet: - handler.setLevel(logging.CRITICAL) - - coadd_camera_frames_kwargs = coadd_camera_frames_kwargs.copy() - if save_camera_coadded_frames is False: - coadd_camera_frames_kwargs["outpath"] = None - - coadd_camera_frames_kwargs["log"] = log - coadd_camera_frames_kwargs["quiet"] = False - coadd_camera_frames_kwargs["verbose"] = False - - camera_to_files: defaultdict[str, list[pathlib.Path]] = defaultdict(list) - for file in files: - for camera in ["east", "west"]: - if camera in str(file): - camera_to_files[camera].append(pathlib.Path(file)) - break + with fits.open(str(path)) as hdul: + # RAW header + raw_header = hdul["RAW"].header - cameras = [camera for camera in camera_to_files if len(camera_to_files[camera]) > 0] - camera_files = [camera_to_files[camera] for camera in cameras] + frameno = get_frameno(path) + telescope = raw_header["TELESCOP"] + camname = raw_header["CAMNAME"] + date_obs = Time(raw_header["DATE-OBS"], format="isot") - # TODO: this could be async but I'm getting some errors in astropy when - # trying it, so for now let's do sync. - ccf_partial = partial(coadd_camera_frames, **coadd_camera_frames_kwargs) - hduls = map(ccf_partial, camera_files) - - # Create master HDU list and concatenate sources. Create MASTER - # extension but leave empty for now. - master_hdu = fits.HDUList([fits.PrimaryHDU()]) - source_dfs: list[pandas.DataFrame] = [] - frame_data_all: list[FrameData] = [] - for ii, (hdul, frame_data) in enumerate(hduls): - if hdul is None: - continue + log_h = f"Frame {telescope}-{camname}-{frameno}:" - assert isinstance(hdul, fits.HDUList) + # PROC header of the raw AG frame. + if "PROC" not in hdul: + log.error(f"{log_h} cannot find PROC extension.") + return None - source_dfs.append(pandas.DataFrame(hdul["SOURCES"].data)) - coadd_hdu = hdul["COADD"] - coadd_hdu.name = f"COADD_{cameras[ii].upper()}" - master_hdu.append(coadd_hdu) + proc_header = hdul["PROC"].header - frame_data_all += frame_data + # We use the original path here always because reprocessed + # files do not include image data. + data, dark_sub = get_dark_subtracted_data(orig_path) + if dark_sub is False: + log.debug(f"{log_h} missing dark frame. Fitting and removing background.") - if len(source_dfs) == 0: - log.error("No source data to concatenate. Cannot create master co-added frame.") - return None + wcs_mode = proc_header["WCSMODE"] + stacked = (wcs_mode == "gaia") or reprocessed - sources_coadd = pandas.concat(source_dfs) - master_hdu.append( - fits.BinTableHDU( - data=Table.from_pandas(sources_coadd), - name="SOURCES", - ) + try: + solution = CameraSolution.open(path) + except ValueError as err: + log.error(f"{log_h} failed creating CameraSolution ({err})") + return None + + if solution.sources is None: + log.warning(f"{log_h} no sources found.") + + # Add information as a FrameData. + return FrameData( + frameno=frameno, + path=path, + solution=solution, + raw_header=dict(raw_header), + camera=camname, + telescope=telescope, + date_obs=date_obs, + sjd=get_sjd("LCO", date_obs.to_datetime()), + exptime=raw_header["EXPTIME"], + image_type=raw_header["IMAGETYP"], + kmirror_drot=raw_header["KMIRDROT"], + focusdt=raw_header["FOCUSDT"], + stacked=stacked, + data=data, + reprocessed=reprocessed, ) - wcs_mf: WCS | None = None - if "x_mf" in sources_coadd: - # Now let's create the master frame WCS. It's easy since we already matched - # with Gaia. - wcs_data = sources_coadd.loc[:, ["x_mf", "y_mf", "ra_epoch", "dec_epoch"]] - wcs_data = wcs_data.dropna() - - skycoords = SkyCoord( - ra=wcs_data.ra_epoch, - dec=wcs_data.dec_epoch, - unit="deg", - frame="icrs", - ) - wcs_mf = fit_wcs_from_points((wcs_data.x_mf, wcs_data.y_mf), skycoords) - - # Initial MF HDU - mf_header = create_coadded_frame_header( - frame_data_all, - sources_coadd, - matched=wcs_mf is not None, - wcs=wcs_mf, - is_master=True, - ) - mf_hdu = fits.ImageHDU(name="MASTER", header=mf_header) - master_hdu.insert(1, mf_hdu) - - # Add PA derived from MF WCS. - pawcs = round(get_crota2(wcs_mf), 3) if wcs_mf else None - mf_hdu.header.insert( - "ZEROPT", - ("PAWCS", pawcs, "[deg] PA of the IFU from master frame co-added WCS"), - ) - guide_data = get_guide_dataframe(frame_data_all) - frame_data_t = Table.from_pandas(framedata_to_dataframe(frame_data_all)) +def process_camera_coadd( + data: ARRAY_2D_F32, + framedata: list[FrameData], + db_connection_params={}, +): + """Processes co-added data, extracts sources, and determines the WCS.""" - master_hdu.append(fits.BinTableHDU(Table.from_pandas(guide_data), name="GUIDEDATA")) - master_hdu.append(fits.BinTableHDU(frame_data_t, name="FRAMEDATA")) + frame: FrameData | None = None + for fd in framedata[::-1]: # In reverse order. Last file is likely to be guiding. + if fd.solution.solved and (fd.solution.wcs_mode == "gaia" or fd.reprocessed): + frame = fd + break - # Calculate PA drift. Do some sigma clipping to remove big outliers. - pa_values: ARRAY_1D_F32 = guide_data.pa.dropna().to_numpy(numpy.float32) - pa_values = cast(ARRAY_1D_F32, sigma_clip(pa_values, 10, masked=True)) - pa_min = round(pa_values.min(), 6) - pa_max = round(pa_values.max(), 6) - pa_drift = abs(pa_max - pa_min) + if frame is None or frame.solution.wcs is None: + raise RuntimeError("Cannot find WCS solutions for coadded frame.") - mf_hdu.header.insert( - "ZEROPT", - ("PAMIN", pa_min, "[deg] Min PA from individual master frames"), + # Extract sources in the co-added frame. + coadd_sources = extract_marginal( + data, + box_size=31, + threshold=3.0, + max_detections=50, + sextractor_quick_options={"minarea": 5}, ) - mf_hdu.header.insert( - "ZEROPT", - ("PAMAX", pa_max, "[deg] Max PA from individual master frames"), + coadd_sources["telescope"] = frame.telescope + coadd_sources["camera"] = frame.camera + + # Add global frame pixels. + xy = coadd_sources.loc[:, ["x", "y"]].to_numpy() + ff_locs, _ = ag_to_full_frame(f"{frame.telescope}-{frame.camera[0]}", xy) + coadd_sources.loc[:, ["x_ff", "y_ff"]] = ff_locs + + # Match with Gaia sources. + coadd_sources, n_matches = match_with_gaia( + frame.solution.wcs, + coadd_sources, + max_separation=3, + concat=True, + db_connection_params=db_connection_params, ) - mf_hdu.header.insert( - "ZEROPT", - ("PADRIFT", pa_drift, "[deg] PA drift in sequence"), + + wcs = None + if n_matches < 5: + log.warning("Insufficient number of Gaia matches. Cannot generate WCS.") + else: + # Get WCS solution from Gaia. + wcs = wcs_from_gaia(coadd_sources) + + zp = estimate_zeropoint(data, coadd_sources) + coadd_sources.update(zp) + + return CoAdd_CameraSolution( + frameno=-1, + coadd_image=data, + telescope=frame.telescope, + camera=frame.camera, + sources=coadd_sources, + wcs=wcs, + wcs_mode="gaia", + matched=wcs is not None, ) - mf_hdu.header["WARNPADR"] = pa_drift > DRIFT_WARN_THRESHOLD - if outpath is not None: - # Create the path for the output file. - sample_header = master_hdu[f"COADD_{cameras[0].upper()}"].header - frameno0 = sample_header["FRAME0"] - frameno1 = sample_header["FRAMEN"] - telescope = sample_header["TELESCOP"] +def get_guider_solutions(root: pathlib.Path, telescope: str, frameno: int): + """Collects guider solutions for a frame.""" - outpath = outpath.format( - telescope=telescope, - frameno0=frameno0, - frameno1=frameno1, - mjd=frame_data_all[0].sjd, + filename = f"lvm.{telescope}.guider_{frameno:08d}.fits" + + log_h = f"Frame {telescope}-{frameno}:" + + path = root / filename + if not path.exists(): + try: + path = reprocess_legacy_guider_frame(root, frameno, telescope) + except Exception: + log.error(f"{log_h} failed retrieving guider solution.") + return None + + guider_data = fits.getheader(path, "GUIDERDATA") + + solution = GuiderSolution.open(path) + solution.guide_mode = guider_data["GUIDMODE"] + solution.ra_field = guider_data["RAFIELD"] + solution.dec_field = guider_data["DECFIELD"] + solution.pa_field = guider_data.get("PAFIELD", numpy.nan) + + return solution + + +def create_coadd_header(solution: CoAdd_CameraSolution): + """Creates the header object for a co-added frame.""" + + assert solution.frames is not None + assert solution.sources is not None + + telescope = solution.telescope + + frame_data = solution.frame_data() + stacked = frame_data.loc[frame_data.stacked == 1, :] + + if len(frame_data) == 0 or len(stacked) == 0: + raise ValueError("No stacked data found.") + + frame0 = frame_data.iloc[0].frameno + framen = frame_data.iloc[-1].frameno + stack0 = stacked.iloc[0].frameno + stackn = stacked.iloc[-1].frameno + + isot = frame_data.iloc[0].date_obs + sjd = get_sjd("LCO", Time(isot, format="isot").to_datetime()) + + # Gets the range of FWHM in the sequence + fwhm0: float | None = None + fwhmn: float | None = None + fwhm_median: float | None = None + fwhm_medians = stacked.fwhm.dropna() + if len(fwhm_medians) > 0: + fwhm0 = fwhm_medians.iloc[0] + fwhmn = fwhm_medians.iloc[-1] + fwhm_median = float(numpy.median(fwhm_medians)) + + # Gets the FWHM of the sources extracted from the stacked image. + cofwhm = solution.fwhm + cofwhmst = solution.sources.loc[solution.sources.valid == 1].fwhm.dropna().median() + + # Determine the PA drift due to k-mirror tracking. + frame_pa = stacked.loc[:, ["frameno", "pa"]].dropna() + pa_min: float = numpy.nan + pa_max: float = numpy.nan + pa_drift: float = numpy.nan + + pa_coeffs = numpy.array([numpy.nan, numpy.nan]) + drift_warn: bool = True + + if telescope == "spec": + pa_min = frame_pa.pa.min() + pa_max = frame_pa.pa.max() + pa_drift = numpy.abs(pa_min - pa_max) + drift_warn = False + + elif len(frame_pa) >= 2: + pa_coeffs = polyfit_with_sigclip( + frame_pa.frameno.to_numpy(numpy.float32), + frame_pa.pa.to_numpy(numpy.float32), + sigma=3, + deg=1, ) - if pathlib.Path(outpath).is_absolute(): - outpath_full = pathlib.Path(outpath) - else: - outpath_full = pathlib.Path(files[0]).parent / outpath + pa_vals = numpy.polyval(pa_coeffs, frame_pa.frameno) + pa_min = numpy.round(pa_vals.min(), 4) + pa_max = numpy.round(pa_vals.max(), 4) + pa_drift = numpy.round(numpy.abs(pa_min - pa_max), 4) - if outpath_full.parent.exists() is False: - outpath_full.parent.mkdir(parents=True, exist_ok=True) + drift_warn = pa_drift > config["coadds"]["warnings"]["pa_drift"] - log.info(f"Writing co-added frame to {outpath_full.absolute()!s}") - master_hdu.writeto(str(outpath_full), overwrite=True) + wcs_header = solution.wcs.to_header() if solution.wcs is not None else [] - return master_hdu + header = header_from_model("CAMERA_COADD") + # Basic info + header["TELESCOP"] = frame_data.iloc[0].telescope + header["CAMNAME"] = frame_data.iloc[0].camera + header["MJD"] = sjd + header.insert("TELESCOP", ("", "/*** BASIC DATA ***/")) -def process_spec_frame( - file: pathlib.Path | str, - outpath: str = MASTER_COADD_SPEC_PATH, - log: logging.Logger = llog, - fail_silent: bool = False, - **kwargs, -): - """Processes the guider frames associated with an spectrograph file.""" + # Frame info + header["FRAME0"] = frame0 + header["FRAMEN"] = framen + header["NFRAMES"] = framen - frame0 + 1 + + if telescope != "spec": + header["STACK0"] = stack0 + header["STACKN"] = stackn + header["NSTACKED"] = stackn - stack0 + 1 + header["COESTIM"] = "median" + header["SIGCLIP"] = solution.sigmaclip + header["SIGMA"] = solution.sigmaclip_sigma + + header["OBSTIME0"] = frame_data.iloc[0].date_obs + header["OBSTIMEN"] = frame_data.iloc[-1].date_obs + header["FWHM0"] = nan_or_none(fwhm0, 3) + header["FWHMN"] = nan_or_none(fwhmn, 3) + header["FHHMMED"] = nan_or_none(fwhm_median, 3) + + if telescope != "spec": + header["COFWHM"] = nan_or_none(cofwhm, 3) + header["COFWHMST"] = nan_or_none(cofwhmst, 3) + + header["PACOEFFA"] = nan_or_none(pa_coeffs[0]) + header["PACOEFFB"] = nan_or_none(pa_coeffs[1]) + + header["PAMIN"] = nan_or_none(pa_min, 4) + header["PAMAX"] = nan_or_none(pa_max, 4) + header["PADRIFT"] = nan_or_none(pa_drift, 4) + + header["ZEROPT"] = nan_or_none(solution.zero_point, 3) + + header["SOLVED"] = solution.solved + header.insert("FRAME0", ("", "/*** CO-ADDED PARAMETERS ***/")) - file = pathlib.Path(file) - if not file.exists(): - raise FileExistsError(f"File {file!s} not found.") + # Warnings + header["WARNPADR"] = drift_warn + header["WARNTRAN"] = False + header["WARNMATC"] = not solution.matched if telescope != "spec" else False + header.insert("WARNPADR", ("", "/*** WARNINGS ***/")) - specno = int(file.name.split("-")[-1].split(".")[0]) - outpath = outpath.format(specno=specno) + if solution.wcs is not None: + header.extend(wcs_header) + header.insert("WCSAXES", ("", "/*** CO-ADDED WCS ***/")) - for telescope in ["sci", "skye", "skyw"]: - try: - frames = get_guider_files_from_spec(file, telescope=telescope) - except Exception as err: - if fail_silent is False: - log.error(f"Cannot process {telescope} for {file!s}: {err}") - continue + return header - if len(frames) == 0: - if fail_silent is False: - log.warning(f"No guider frames found for {telescope!r} in {file!s}") - continue - log.info( - f"Generating master co-added frame for {telescope!r} for " - f"spectrograph file {file!s}" +def create_global_header(solution: GlobalSolution): + """Creates the header object for a global frame. + + This function is very similar to `.create_coadd_header` but there are + enough differences that it's worth keeping them separate and duplicate + a bit of code. + + """ + + assert solution.sources is not None + + telescope = solution.telescope + + frame_data = solution.frame_data() + guider_data = solution.guider_data() + + if len(frame_data) == 0 or len(guider_data) == 0: + raise RuntimeError("No frame or guider data to combine.") + + guiding = guider_data.loc[guider_data.guide_mode == "guide"] + + frame0 = frame_data.frameno.min() + framen = frame_data.frameno.max() + + isot = frame_data.iloc[0].date_obs + sjd = get_sjd("LCO", Time(isot, format="isot").to_datetime()) + + # Gets the range of FWHM in the sequence + fwhm0: float | None = None + fwhmn: float | None = None + fwhm_median: float | None = None + fwhm_medians = guiding.fwhm.dropna() + if len(fwhm_medians) > 0: + fwhm0 = fwhm_medians.iloc[0] + fwhmn = fwhm_medians.iloc[-1] + fwhm_median = float(numpy.median(fwhm_medians)) + + # Determine the PA drift due to k-mirror tracking. + frame_pa = guiding.loc[:, ["frameno", "pa"]].dropna() + pa_min: float = numpy.nan + pa_max: float = numpy.nan + pa_drift: float = numpy.nan + + pa_coeffs = numpy.array([numpy.nan, numpy.nan]) + drift_warn: bool = True + + if telescope == "spec": + pa_min = frame_pa.pa.min() + pa_max = frame_pa.pa.max() + pa_drift = numpy.abs(pa_min - pa_max) + drift_warn = False + + elif len(frame_pa) >= 2: + pa_coeffs = polyfit_with_sigclip( + frame_pa.frameno.to_numpy(numpy.float32), + frame_pa.pa.to_numpy(numpy.float32), + sigma=3, + deg=1, ) - create_master_coadd(frames, outpath=outpath, log=log, **kwargs) + pa_vals = numpy.polyval(pa_coeffs, frame_pa.frameno) + pa_min = numpy.round(pa_vals.min(), 4) + pa_max = numpy.round(pa_vals.max(), 4) + pa_drift = numpy.round(numpy.abs(pa_min - pa_max), 4) + drift_warn = pa_drift > config["coadds"]["warnings"]["pa_drift"] -def process_all_spec_frames(path: pathlib.Path | str, **kwargs): - """Processes all the spectrograph frames in a directory.""" + wcs_header = [] + pa_warn: bool = False + if solution.wcs is not None: + wcs_header = solution.wcs.to_header() - path = pathlib.Path(path) - spec_files = sorted(path.glob("sdR-*-b1-*.fits.gz")) + pa_field = guider_data.pa_field.iloc[0] + pa = solution.pa + pa_warn_threshold = config["coadds"]["warnings"]["pa_error"] - for file in spec_files: - # Skip cals and frames without any guider information. - header = fits.getheader(str(file)) + if pa_field is not None: + pa_warn = abs(angle_difference(pa, pa_field)) > pa_warn_threshold - if header["IMAGETYP"] != "object": - continue + header = header_from_model("GLOBAL_COADD") - process_spec_frame(file, fail_silent=True, **kwargs) + # Basic info + header["TELESCOP"] = frame_data.iloc[0].telescope + header["CAMNAME"] = frame_data.iloc[0].camera + header["MJD"] = sjd + header.insert("TELESCOP", ("", "/*** BASIC DATA ***/")) + # Frame info + header["FRAME0"] = frame0 + header["FRAMEN"] = framen + header["NFRAMES"] = framen - frame0 + 1 -def get_guider_files_from_spec( - spec_file: str | pathlib.Path, - telescope: str | None = None, - agcam_path: str | pathlib.Path = "/data/agcam", - camera: str | None = None, -): - """Returns the AG files taken during a spectrograph exposure. + header["OBSTIME0"] = frame_data.iloc[0].date_obs + header["OBSTIMEN"] = frame_data.iloc[-1].date_obs - A convenience function that reads the header of the spectrograph frame, - extracts the guide frames range and returns a list of those frames - filenames (missing frames are skipped). + header["FWHM0"] = nan_or_none(fwhm0, 3) + header["FWHMN"] = nan_or_none(fwhmn, 3) + header["FHHMMED"] = nan_or_none(fwhm_median, 3) - Parameters + if telescope != "spec": + header["PACOEFFA"] = nan_or_none(pa_coeffs[0]) + header["PACOEFFB"] = nan_or_none(pa_coeffs[1]) + + header["PAMIN"] = nan_or_none(pa_min, 4) + header["PAMAX"] = nan_or_none(pa_max, 4) + header["PADRIFT"] = nan_or_none(pa_drift, 4) + + header["ZEROPT"] = nan_or_none(solution.zero_point, 3) + + header["SOLVED"] = solution.solved + header.insert("FRAME0", ("", "/*** GLOBAL FRAME PARAMETERS ***/")) + + # Pointing + if telescope != "spec": + header["XFFPIX"] = guider_data.x_ff_pixel.iloc[0] + header["ZFFPIX"] = guider_data.z_ff_pixel.iloc[0] + header["RAFIELD"] = nan_or_none(guider_data.ra_field.iloc[0], 6) + header["DECFIELD"] = nan_or_none(guider_data.dec_field.iloc[0], 6) + header["PAFIELD"] = nan_or_none(guider_data.pa_field.iloc[0], 4) + header["RAMEAS"] = nan_or_none(guider_data.ra.iloc[0], 6) + header["DECMEAS"] = nan_or_none(guider_data.dec.iloc[0], 6) + header["PAMEAS"] = nan_or_none(guider_data.pa.iloc[0], 4) + + # Warnings + header["WARNPA"] = pa_warn + header["WARNPADR"] = drift_warn + header["WARNTRAN"] = False + header.insert("WARNPA", ("", "/*** WARNINGS ***/")) + + if solution.wcs is not None: + header.extend(wcs_header) + header.insert("WCSAXES", ("", "/*** CO-ADDED WCS ***/")) + + return header + + +def reprocess_agcam(file: AnyPath, overwrite: bool = False, db_connection_params={}): + """Reprocesses an old file and converts it to the ``>=0.4.0`` format. + + .. warning:: + This function reprocesses a file on a best-effort basis. The astrometric + solution is calculated using astrometry.net, which may be different + from the original solution. + + Paremeters ---------- - spec_file - The path to the spectrograph frame to use to determine the range. - telescope - The telescope for which to extract the range of guide/AG frames. - Defaults to the value in the configuration file. - agcam_path - The ``agcam`` path where AG frames are ordered by SJD. - camera - The camera, east or west, for which to select frames. If not provided - selects both cameras. + file + The file to reprocess. + overwrite + Exits early if the reprocessed file already exists. + db_connection_params + Database connection details to query Gaia. Returns ------- - frames - A list of AG file names that match the range of guide frames - taken during the spectrograph exposure. Missing frames in the - range are not included. + processed + The path to the processed file. The processed file is written relative + to the parent of the input file as ``reprocessed/``. Note that + the reprocessed file does not include the raw original data to + save space since that would not have changed. Reprocessed files are + assigned ``GUIDERV=0.99.0``. """ - spec_file = pathlib.Path(spec_file) - if not spec_file.exists(): - raise FileExistsError(f"Cannot find file {spec_file!s}") + file = pathlib.Path(file) + reproc_path = file.parent / "reprocessed" / file.name + sources_path = reproc_path.with_suffix(".parquet") - header = fits.getheader(str(spec_file)) + if reproc_path.exists() and not overwrite: + log.debug(f"Found reprocessed file {reproc_path!s}") + return reproc_path - if telescope is None: - telescope = config["telescope"] + hdul_orig = fits.open(file) - assert isinstance(telescope, str) + if "PROC" not in hdul_orig: + # Just return, there's an informative error later. + return - sjd = get_sjd("LCO", date=Time(header["OBSTIME"], format="isot").to_datetime()) + log.warning(f"File {file!s} is too old and will be reprocessed.") - agcam_path = pathlib.Path(agcam_path) / str(sjd) - if not agcam_path.exists(): - raise FileExistsError(f"Cannot find agcam path {agcam_path!s}") + if "RAW" in hdul_orig: + raw = hdul_orig["RAW"].header + else: + raw = hdul_orig[0].header - frame0 = header.get(f"G{telescope.upper()}FR0", None) - frame1 = header.get(f"G{telescope.upper()}FRN", None) + proc = hdul_orig["PROC"].header if "PROC" in hdul_orig else {} - if frame0 is None or frame1 is None: - time0 = Time(header["INTSTART"]) - time1 = Time(header["INTEND"]) - files = get_files_in_time_range( - agcam_path, - time0, - time1, - pattern=f"lvm.{telescope}.*.fits", + sources = extract_sources(file) + + solution: AstrometrySolution = solve_camera_with_astrometrynet( + sources, + ra=raw["RA"], + dec=raw["DEC"], + ) + wcs = solution.wcs + + # Now match with Gaia. + if solution.wcs is not None: + sources, _ = match_with_gaia( + solution.wcs, + sources, + concat=True, + db_connection_params=db_connection_params, ) - if len(files) == 0: - raise ValueError(f"Cannot determine the guider frames for {spec_file!s}") + darkfile = proc.get("DARKFILE", None) + if darkfile: + darkfile = os.path.basename(darkfile) + + new_proc = header_from_model("PROC") + new_proc["TELESCOP"] = raw["TELESCOP"] + new_proc["CAMNAME"] = raw["CAMNAME"] + new_proc["DARKFILE"] = darkfile + new_proc["DIRNAME"] = str(file.parent) + new_proc["GUIDERV"] = "0.99.0" + new_proc["SOURCESF"] = sources_path.name + new_proc["PA"] = nan_or_none(get_crota2(wcs), 4) if wcs else None + new_proc["ZEROPT"] = nan_or_none(sources.zp.dropna().median()) + new_proc["WCSMODE"] = "none" if wcs is None else "astrometrynet" + new_proc["ORIGFILE"] = (file.name, "Original file name") + new_proc["REPROC"] = (True, "Has this file been reprocessed?") + + if wcs is not None: + new_proc.extend(wcs.to_header()) + + reproc_path.parent.mkdir(parents=True, exist_ok=True) + + hdul = fits.HDUList( + [ + fits.PrimaryHDU(), + fits.ImageHDU(header=raw, name="RAW"), + fits.ImageHDU(header=new_proc, name="PROC"), + ] + ) + hdul.writeto(str(reproc_path), overwrite=True) + hdul.close() - return sorted(files) + sources.to_parquet(sources_path) - return get_frame_range(agcam_path, telescope, frame0, frame1, camera=camera) + return reproc_path -def get_files_in_time_range( - path: pathlib.Path, - time0: Time, - time1: Time, - pattern: str = "*", +def reprocess_legacy_guider_frame( + root: pathlib.Path, + frameno: int, + telescope: str, + overwrite: bool = False, ): - """Returns all files in a directory with creation time in the time range.""" + """Reprocesses a legacy ``proc-`` file. + + .. warning:: + This function reprocesses a file on a best-effort basis. Usually + the individual frames used to generate the solution have also been + reprocessed and the results may differ to those actually observed + on sky. + + Paremeters + ---------- + root + The root path (usually the ``agcam`` directory for the given MJD). + frameno + The frame number. + telescope + The telescope that took the images. + overwrite + If `False` and a reprocessed ``lvm.guider`` file has been generated, + uses that one. + + Returns + ------- + processed + The path to the processed file. The processed file is written relative + to the parent of the input file as + ``reprocessed/lvm,{telescope}.guider_{frameno:08d}.fits``. Reprocessed + files are assigned ``GUIDERV=0.99.0``. The sources ``.parquet`` table + file is also generated. + + + """ + + log_h = f"{telescope}-{frameno}:" + + proc_file = root / f"proc-lvm.{telescope}.agcam_{frameno:08d}.fits" + guider_file = root / "reprocessed" / f"lvm.{telescope}.guider_{frameno:08d}.fits" + guider_sources_file = guider_file.with_suffix(".parquet") - files = path.glob(pattern) + if not proc_file.exists(): + raise FileNotFoundError(f"Cannot find file {proc_file!s}") + + if overwrite is False and guider_file.exists(): + log.debug(f"{log_h} found reprocessed lvm.guider file {guider_file!s}.") + return guider_file + + log.warning(f"{log_h} using legacy proc- file {proc_file!s}.") + + proc = dict(fits.getheader(proc_file, "ASTROMETRY")) + + gdata_header = header_from_model("GUIDERDATA_HEADER") + for key in gdata_header: + if key in proc: + gdata_header[key] = proc[key] + + gdata_header["GUIDERV"] = "0.99.0" + gdata_header["DIRNAME"] = str(guider_file.parent) + gdata_header["SOURCESF"] = guider_sources_file.name + + gdata_header["XFFPIX"] = config["xz_full_frame"][0] + gdata_header["ZFFPIX"] = config["xz_full_frame"][1] + + gdata_header["GUIDMODE"] = "acquisition" if proc["ACQUISIT"] else "guide" + + # Get the sources. Try first the location of the new-style sources file. + # Otherwise check if there's a reprocessed file. + sources: list[pandas.DataFrame] = [] + for key in ["FILE0", "FILE1"]: + filex = proc.get(key, None) + if filex is None: + continue + + filex = pathlib.Path(filex).name + + found: bool = False + if (root / filex).with_suffix(".parquet").exists(): + sources_file = (root / filex).with_suffix(".parquet") + sources.append(pandas.read_parquet(sources_file)) + found = True + + elif (root / "reprocessed" / filex).with_suffix(".parquet").exists(): + sources_file = (root / "reprocessed" / filex).with_suffix(".parquet") + sources.append(pandas.read_parquet(sources_file)) + found = True + + if found: + if "east" in filex: + gdata_header["FILEEAST"] = filex + else: + gdata_header["FILEWEST"] = filex + + if len(sources) == 0: + raise RuntimeError(f"No sources found for guider frame {proc_file!s}") + + sources_concat = pandas.concat(sources) + if len(sources_concat.ra.dropna()) > 5: + wcs = wcs_from_gaia(sources_concat, xy_cols=["x_ff", "y_ff"]) + else: + log.warning(f"Insufficient Gaia matches for {proc_file!s}. Cannot fit WCS.") + wcs = None + + if wcs is not None: + gdata_header.extend(wcs.to_header()) + + gdata_header["PAMEAS"] = nan_or_none(get_crota2(wcs), 4) + gdata_header["ZEROPT"] = nan_or_none(sources_concat.zp.dropna().median(), 3) + gdata_header["SOLVED"] = True + + hdul = fits.HDUList( + [ + fits.PrimaryHDU(), + fits.ImageHDU(header=gdata_header, name="GUIDERDATA"), + ] + ) + hdul.writeto(str(guider_file), overwrite=True) - dt0 = datetime.timestamp(time0.to_datetime()) - dt1 = datetime.timestamp(time1.to_datetime()) + sources_concat.to_parquet(guider_sources_file) - return [ - file - for file in files - if os.path.getctime(file) > dt0 and os.path.getctime(file) < dt1 - ] + return guider_file diff --git a/src/lvmguider/dataclasses.py b/src/lvmguider/dataclasses.py index 8a9f556..216cc08 100644 --- a/src/lvmguider/dataclasses.py +++ b/src/lvmguider/dataclasses.py @@ -9,19 +9,21 @@ from __future__ import annotations import pathlib +import warnings from dataclasses import dataclass, field +from typing import Literal + import nptyping as npt import numpy import pandas from astropy.io import fits -from astropy.table import Table from astropy.time import Time -from astropy.wcs import WCS +from astropy.wcs import WCS, FITSFixedWarning from packaging.version import Version -from lvmguider import config -from lvmguider.tools import get_frameno +from lvmguider import config, log +from lvmguider.tools import dataframe_from_model, get_frameno from lvmguider.transformations import get_crota2 from lvmguider.types import ARRAY_2D_F32 @@ -29,24 +31,20 @@ __all__ = ["CameraSolution", "GuiderSolution", "FrameData"] -@dataclass -class CameraSolution: - """A camera solution, including the determined WCS.""" +WCS_MODE_T = Literal["none"] | Literal["astrometrynet"] | Literal["gaia"] - frameno: int - camera: str - path: pathlib.Path - sources: pandas.DataFrame | None = None + +@dataclass(kw_only=True) +class BaseSolution: + """A base class for an object with an astrometric solution.""" + + telescope: str wcs: WCS | None = None matched: bool = False - zero_point: float = numpy.nan - pa: float = numpy.nan - ref_frame: pathlib.Path | None = None - wcs_mode: str = "none" def __post_init__(self): - if numpy.isnan(self.pa) and self.wcs is not None: - self.pa = get_crota2(self.wcs) + if self.sources is None: + self.sources = dataframe_from_model("SOURCES") @property def solved(self): @@ -54,6 +52,55 @@ def solved(self): return self.wcs is not None + @property + def pa(self): + """Returns the position angle from the WCS.""" + + if self.wcs is not None: + return get_crota2(self.wcs) + + return numpy.nan + + @property + def zero_point(self): + """Returns the median zero point.""" + + if self.sources is not None: + return float(self.sources.zp.dropna().median()) + + return numpy.nan + + @property + def fwhm(self): + """Returns the FWHM from the extracted sources.""" + + if self.sources is not None: + return self.sources.loc[self.sources.valid == 1].fwhm.dropna().median() + + return numpy.nan + + @property + def pointing(self) -> npt.NDArray[npt.Shape["2"], npt.Float64]: + """Returns the camera pointing at its central pixel.""" + + if not self.wcs: + return numpy.array([numpy.nan, numpy.nan]) + + skyc = self.wcs.pixel_to_world(*config["xz_ag_frame"]) + return numpy.array([skyc.ra.deg, skyc.dec.deg]) + + +@dataclass(kw_only=True) +class CameraSolution(BaseSolution): + """A camera solution, including the determined WCS.""" + + frameno: int + camera: str + path: pathlib.Path + sources: pandas.DataFrame | None = None + ref_frame: pathlib.Path | None = None + wcs_mode: WCS_MODE_T = "none" + @classmethod def open(cls, file: str | pathlib.Path): """Creates an instance from an ``lvm.agcam`` file.""" @@ -64,9 +111,6 @@ def open(cls, file: str | pathlib.Path): if "PROC" not in hdul: raise ValueError("HDU list does not have a PROC extension.") - if "SOURCES" not in hdul: - raise ValueError("HDU list does not have a SOURCES extension.") - raw = hdul["RAW"].header proc = hdul["PROC"].header @@ -79,30 +123,34 @@ def open(cls, file: str | pathlib.Path): "The file was generated with an unsupported version of lvmguider." ) - sources = Table(hdul["SOURCES"].data).to_pandas() + sources: pandas.DataFrame | None = None + if proc["SOURCESF"] != "": + dirname = file.parent + sources = pandas.read_parquet(dirname / proc["SOURCESF"]) - wcs_mode = proc["WCSMODE"] - wcs = WCS(proc) if wcs_mode != "none" else None + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=FITSFixedWarning) + wcs_mode = proc["WCSMODE"] + wcs = WCS(proc) if wcs_mode != "none" else None ref_file = proc["REFFILE"] ref_frame = pathlib.Path(ref_file) if ref_file is not None else None return CameraSolution( - get_frameno(file), - raw["CAMNAME"], - file.absolute(), - sources, + frameno=get_frameno(file), + telescope=raw["TELESCOP"], + camera=raw["CAMNAME"], + path=file.absolute(), + sources=sources, wcs=wcs, wcs_mode=wcs_mode, - matched=len(sources.ra.dropna()) > 0, - pa=proc["PA"] or numpy.nan, - zero_point=proc["ZEROPT"] or numpy.nan, + matched=len(sources.ra.dropna()) > 0 if sources is not None else False, ref_frame=ref_frame, ) -@dataclass -class GuiderSolution: +@dataclass(kw_only=True) +class GuiderSolution(BaseSolution): """A class to hold an astrometric solution determined by the guider. This class abstracts an astrometric solution regardless of whether it was @@ -111,30 +159,30 @@ class GuiderSolution: """ frameno: int + telescope: str solutions: list[CameraSolution] guide_pixel: npt.NDArray[npt.Shape["2"], npt.Float32] - mf_wcs: WCS | None = None - pa: float = numpy.nan + ra_field: float = numpy.nan + dec_field: float = numpy.nan + pa_field: float = numpy.nan ra_off: float = numpy.nan dec_off: float = numpy.nan + pa_off: float = numpy.nan axis0_off: float = numpy.nan axis1_off: float = numpy.nan - pa_off: float = numpy.nan correction_applied: bool = False correction: list[float] = field(default_factory=lambda: [0.0, 0.0, 0.0]) + guide_mode: str = "guide" - def __post_init__(self): - self.sources = pandas.concat( + @property + def sources(self): + """Concatenates sources from the co-added solutions.""" + + return pandas.concat( [cs.sources for cs in self.solutions if cs.sources is not None], axis=0, ) - zps = self.sources.loc[:, "zp"].copy().dropna() - self.zero_point = zps.median() - - if numpy.isnan(self.pa) and self.mf_wcs is not None: - self.pa = get_crota2(self.mf_wcs) - def __getitem__(self, key: str): """Returns the associated camera solution.""" @@ -146,29 +194,29 @@ def __getitem__(self, key: str): @property def pointing(self) -> npt.NDArray[npt.Shape["2"], npt.Float64]: - """Returns the telescope pointing at boresight.""" + """Returns the telescope pointing at boresight from the WCS.""" - if not self.mf_wcs: + if not self.wcs: return numpy.array([numpy.nan, numpy.nan]) - skyc = self.mf_wcs.pixel_to_world(*config["xz_full_frame"]) + skyc = self.wcs.pixel_to_world(*config["xz_full_frame"]) return numpy.array([skyc.ra.deg, skyc.dec.deg]) @property def pixel_pointing(self) -> npt.NDArray[npt.Shape["2"], npt.Float64]: - """Returns the telescope pointing at the master frame pixel.""" + """Returns the telescope pointing at the full frame pixel from the WCS.""" - if not self.mf_wcs: + if not self.wcs: return numpy.array([numpy.nan, numpy.nan]) - skyc = self.mf_wcs.pixel_to_world(*self.guide_pixel) + skyc = self.wcs.pixel_to_world(*self.guide_pixel) return numpy.array([skyc.ra.deg, skyc.dec.deg]) @property def solved(self): """Was the frame solved?""" - return self.mf_wcs is not None + return self.wcs is not None @property def cameras(self): @@ -211,20 +259,31 @@ def open(cls, file: str | pathlib.Path, dirname: str | pathlib.Path | None = Non "The file was generated with an unsupported version of lvmguider." ) - mf_wcs = WCS(guider_data) if guider_data["SOLVED"] else None + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=FITSFixedWarning) + wcs = WCS(guider_data) if guider_data["SOLVED"] else None solutions: list[CameraSolution] = [] for key in ["FILEEAST", "FILEWEST"]: if guider_data[key] is not None: - dirname = pathlib.Path(dirname or guider_data["DIRNAME"]) + dirname = pathlib.Path(dirname or guider_data["DIRNAME"] or file.parent) solutions.append(CameraSolution.open(dirname / guider_data[key])) + if "XFFPIX" in guider_data: + guide_pixel = numpy.array([guider_data["XFFPIX"], guider_data["ZFFPIX"]]) + elif "XMFPIX" in guider_data: + # From when we called it master frame. + guide_pixel = numpy.array([guider_data["XMFPIX"], guider_data["ZMFPIX"]]) + else: + log.warning("Missing pixel information. Default to central pixel.") + guide_pixel = config["xz_full_frame"] + return GuiderSolution( - get_frameno(file), - solutions, - numpy.array([guider_data["XMFPIX"], guider_data["ZMFPIX"]]), - mf_wcs=mf_wcs, - pa=guider_data, + frameno=get_frameno(file), + telescope=guider_data["TELESCOP"], + solutions=solutions, + guide_pixel=guide_pixel, + wcs=wcs, ra_off=guider_data["OFFRAMEA"] or numpy.nan, dec_off=guider_data["OFFDEMEA"] or numpy.nan, pa_off=guider_data["OFFPAMEA"] or numpy.nan, @@ -239,13 +298,13 @@ def open(cls, file: str | pathlib.Path, dirname: str | pathlib.Path | None = Non ) -@dataclass +@dataclass(kw_only=True) class FrameData: """Data associated with a frame.""" - file: pathlib.Path - raw_header: fits.Header frameno: int + path: pathlib.Path + raw_header: dict date_obs: Time sjd: int camera: str @@ -254,13 +313,135 @@ class FrameData: image_type: str kmirror_drot: float focusdt: float - proc_header: fits.Header | None = None - proc_file: pathlib.Path | None = None - guide_error: float | None = None - fwhm_median: float | None = None - fwhm_std: float | None = None - guide_mode: str = "guide" + solution: CameraSolution stacked: bool = False - wcs: WCS | None = None - wcs_mode: str | None = None + reprocessed: bool = False data: ARRAY_2D_F32 | None = None + + @property + def sources(self): + """Accessor to the camera solution sources.""" + + return self.solution.sources + + +@dataclass(kw_only=True) +class CoAdd_CameraSolution(CameraSolution): + """A camera solution for a co-added frame.""" + + path: pathlib.Path = pathlib.Path("") + coadd_image: ARRAY_2D_F32 | None = None + frames: list[FrameData] | None = None + sources: pandas.DataFrame | None = None + sigmaclip: bool = False + sigmaclip_sigma: float | None = None + + def frame_data(self): + """Returns a Pandas data frame from a list of `.FrameData`.""" + + df = dataframe_from_model("FRAMEDATA") + + if self.frames is None: + return df + + for ii, fd in enumerate(self.frames): + new_row = dict( + frameno=fd.frameno, + date_obs=fd.date_obs.isot, + camera=fd.camera, + telescope=fd.telescope, + exptime=numpy.float32(fd.exptime), + kmirror_drot=numpy.float32(fd.kmirror_drot), + focusdt=numpy.float32(fd.focusdt), + fwhm=numpy.float32(fd.solution.fwhm), + pa=numpy.float32(fd.solution.pa), + zero_point=numpy.float32(fd.solution.zero_point), + stacked=int(fd.stacked), + solved=int(fd.solution.solved), + wcs_mode=fd.solution.wcs_mode, + ) + df.loc[ii, list(new_row)] = list(new_row.values()) + + df = df.sort_values(["frameno", "camera"]) + + df.reset_index(drop=True, inplace=True) + + return df + + +@dataclass(kw_only=True) +class GlobalSolution(BaseSolution): + """A global solution from co-added frames.""" + + coadd_solutions: list[CoAdd_CameraSolution] + guider_solutions: list[GuiderSolution] + path: pathlib.Path = pathlib.Path("") + matched: bool = True + + @property + def sources(self): + """Concatenates sources from the co-added solutions.""" + + return pandas.concat( + [cs.sources for cs in self.coadd_solutions if cs.sources is not None], + axis=0, + ) + + @property + def pointing(self) -> npt.NDArray[npt.Shape["2"], npt.Float64]: + """Returns the camera pointing at its central pixel.""" + + if not self.wcs: + return numpy.array([numpy.nan, numpy.nan]) + + skyc = self.wcs.pixel_to_world(*config["xz_full_frame"]) + return numpy.array([skyc.ra.deg, skyc.dec.deg]) + + def frame_data(self): + """Concatenates the frame data and returns a Pandas data frame.""" + + frame_data = pandas.concat([cs.frame_data() for cs in self.coadd_solutions]) + frame_data.sort_values(["frameno", "camera"], inplace=True) + + return frame_data + + def guider_data(self): + """Constructs a data frame with guider data.""" + + df = dataframe_from_model("GUIDERDATA_FRAME") + + for ii, gs in enumerate(self.guider_solutions): + pa = get_crota2(gs.wcs) if gs.wcs is not None else numpy.nan + pointing = gs.pointing + + new_row = dict( + frameno=gs.frameno, + telescope=gs.telescope, + fwhm=numpy.float32(gs.fwhm), + pa=numpy.float32(pa), + zero_point=numpy.float32(gs.zero_point), + solved=int(gs.solved), + guide_mode=gs.guide_mode, + x_ff_pixel=numpy.float32(gs.guide_pixel[0]), + z_ff_pixel=numpy.float32(gs.guide_pixel[1]), + ra=numpy.float64(pointing[0]), + dec=numpy.float64(pointing[1]), + ra_field=numpy.float64(gs.ra_field), + dec_field=numpy.float64(gs.dec_field), + pa_field=numpy.float32(gs.pa_field), + ra_off=numpy.float32(gs.ra_off), + dec_off=numpy.float32(gs.dec_off), + pa_off=numpy.float32(gs.pa_off), + axis0_off=numpy.float32(gs.axis0_off), + axis1_off=numpy.float32(gs.axis1_off), + applied=int(gs.correction_applied), + ax0_applied=numpy.float32(gs.correction[0]), + ax1_applied=numpy.float32(gs.correction[1]), + rot_applied=numpy.float32(gs.correction[2]), + ) + df.loc[ii, list(new_row)] = list(new_row.values()) + + df = df.sort_values(["frameno"]) + df.reset_index(drop=True, inplace=True) + + return df diff --git a/src/lvmguider/etc/datamodel.yml b/src/lvmguider/etc/datamodel.yml index 2f1adf8..f20c8b0 100644 --- a/src/lvmguider/etc/datamodel.yml +++ b/src/lvmguider/etc/datamodel.yml @@ -7,6 +7,8 @@ PROC: SOURCESF: [null, 'Sources file'] PA: [null, '[deg] Position angle from WCS'] ZEROPT: [null, '[mag] Zero point of the LVM magnitude'] + ORIGFILE: [null, 'Original file name'] + REPROC: [false, 'Has this file been reprocessed?'] REFFILE: [null, 'Reference file for astrometric solution'] WCSMODE: ['none', 'Source of astrometric solution'] @@ -51,11 +53,11 @@ SOURCES: yrms: 'f8' yfitvalid: 'Int8' valid: 'Int8' - fwhm: 'f8' + fwhm: 'f4' telescope: 'U4' camera: 'U4' - x_mf: 'f8' - y_mf: 'f8' + x_ff: 'f8' + y_ff: 'f8' source_id: 'Int64' ra: 'f8' dec: 'f8' @@ -71,7 +73,7 @@ SOURCES: ap_fluxerr: 'f4' zp: 'f4' -GUIDERDATA: +GUIDERDATA_HEADER: GUIDERV: [null, 'Version of lvmguider'] TELESCOP: [null, 'Telescope that took the images'] GUIDMODE: ['acquition', 'Acquisition or guiding?'] @@ -82,14 +84,14 @@ GUIDERDATA: DIRNAME: [null, 'Original file directory'] SOURCESF: [null, 'Sources file'] SOLVED: [False, 'Astrometric solution found?'] - XMFPIX: [null, 'The x pixel of the master frame'] - ZMFPIX: [null, 'The z pixel of the master frame'] + XFFPIX: [null, 'The x pixel of the full frame'] + ZFFPIX: [null, 'The z pixel of the full frame'] RAFIELD: [null, '[deg] Field RA'] DECFIELD: [null, '[deg] Field Dec'] PAFIELD: [null, '[deg] Field PA'] RAMEAS: [null, '[deg] Measured RA position (boresight)'] DECMEAS: [null, '[deg] Measured Dec position (boresight)'] - PAMEAS: [null, '[deg] Position angle from master frame WCS'] + PAMEAS: [null, '[deg] Position angle from full frame WCS'] OFFRAMEA: [null, '[arcsec] RA measured offset'] OFFDEMEA: [null, '[arcsec] Dec measured offset'] OFFA0MEA: [null, '[arcsec] Motor axis 0 measured offset'] @@ -108,3 +110,108 @@ GUIDERDATA: AX1KI: [null, 'Axis 1 PID I term'] AX1KD: [null, 'Axis 1 PID D term'] ZEROPT: [null, '[mag] Zero point of the LVM magnitude'] + +FRAMEDATA: + frameno: 'Int16' + telescope: 'S4' + camera: 'S4' + date_obs: 'S25' + exptime: 'f4' + kmirror_drot: 'f4' + focusdt: 'f4' + fwhm: 'f4' + pa: 'f4' + zero_point: 'f4' + stacked: 'Int8' + solved: 'Int8' + wcs_mode: 'S15' + +GUIDERDATA_FRAME: + frameno: 'Int16' + telescope: 'S4' + solved: 'Int8' + guide_mode: 'S12' + fwhm: 'f4' + zero_point: 'f4' + x_ff_pixel: 'f4' + z_ff_pixel: 'f4' + ra: 'f8' + dec: 'f8' + pa: 'f4' + ra_field: 'f8' + dec_field: 'f8' + pa_field: 'f4' + ra_off: 'f4' + dec_off: 'f4' + pa_off: 'f4' + axis0_off: 'f4' + axis1_off: 'f4' + applied: 'Int8' + ax0_applied: 'f4' + ax1_applied: 'f4' + rot_applied: 'f4' + +CAMERA_COADD: + TELESCOP: [null, 'Telescope that took the data'] + CAMNAME: [null, 'Camera name'] + INSTRUME: ['LVM', 'SDSS-V Local Volume Mapper'] + OBSERVAT: ['LCO', 'Observatory'] + MJD: [null, 'SDSS MJD (MJD+0.4)'] + FRAME0: [null, 'First frame in guide sequence'] + FRAMEN: [null, 'Last frame in guide sequence'] + NFRAMES: [null - frame0 + 1, 'Number of frames in sequence'] + STACK0: [null, 'First stacked frame'] + STACKN: [null, 'Last stacked frame'] + NSTACKED: [null, 'Number of frames stacked'] + COESTIM: ['none', 'Estimator used to stack data'] + SIGCLIP: [false, 'Was the stack sigma-clipped?'] + SIGMA: [null, 'Sigma used for sigma-clipping'] + OBSTIME0: [null, 'DATE-OBS of first frame'] + OBSTIMEN: [null, 'DATE-OBS of last frame'] + FWHM0: [null, '[arcsec] FWHM of sources in first frame'] + FWHMN: [null, '[arcsec] FWHM of sources in last frame'] + FHHMMED: [null, '[arcsec] Median of the FHWM of all frames'] + COFWHM: [null, '[arcsec] Co-added median FWHM'] + COFWHMST: [null, '[arcsec] Co-added FWHM standard deviation'] + PACOEFFA: [null, 'Slope of PA fit'] + PACOEFFB: [null, 'Intercept of PA fit'] + PAMIN: [null, '[deg] Minimum PA from WCS'] + PAMAX: [null, '[deg] Maximum PA from WCS'] + PADRIFT: [null, '[deg] PA drift in frame range'] + ZEROPT: [null, '[mag] Instrumental zero-point'] + SOLVED: [false, 'Was the co-added frame astrometrically solved?'] + WARNPADR: [null, 'PA drift > 0.1 degrees'] + WARNTRAN: [null, 'Transparency N magnitudes above photometric'] + WARNMATC: [null, 'Co-added frame could not be matched with Gaia'] + +GLOBAL_COADD: + TELESCOP: [null, 'Telescope that took the data'] + INSTRUME: ['LVM', 'SDSS-V Local Volume Mapper'] + OBSERVAT: ['LCO', 'Observatory'] + MJD: [null, 'SDSS MJD (MJD+0.4)'] + FRAME0: [null, 'First frame in guide sequence'] + FRAMEN: [null, 'Last frame in guide sequence'] + NFRAMES: [null - frame0 + 1, 'Number of frames in sequence'] + OBSTIME0: [null, 'DATE-OBS of first frame'] + OBSTIMEN: [null, 'DATE-OBS of last frame'] + FWHM0: [null, '[arcsec] FWHM of sources in first frame'] + FWHMN: [null, '[arcsec] FWHM of sources in last frame'] + FHHMMED: [null, '[arcsec] Median of the FHWM of all frames'] + PACOEFFA: [null, 'Slope of PA fit'] + PACOEFFB: [null, 'Intercept of PA fit'] + PAMIN: [null, '[deg] Minimum PA from WCS'] + PAMAX: [null, '[deg] Maximum PA from WCS'] + PADRIFT: [null, '[deg] PA drift in frame range'] + ZEROPT: [null, '[mag] Instrumental zero-point'] + SOLVED: [false, 'Was the global frame astrometrically solved?'] + XFFPIX: [null, 'The x pixel of the full frame'] + ZFFPIX: [null, 'The z pixel of the full frame'] + RAFIELD: [null, '[deg] Field RA'] + DECFIELD: [null, '[deg] Field Dec'] + PAFIELD: [null, '[deg] Field PA'] + RAMEAS: [null, '[deg] Measured RA position (boresight)'] + DECMEAS: [null, '[deg] Measured Dec position (boresight)'] + PAMEAS: [null, '[deg] Position angle from full frame WCS'] + WARNPA: [null, 'PA pointing error > 0.7 arcmin'] + WARNPADR: [null, 'PA drift > 0.7 arcmin'] + WARNTRAN: [null, 'Transparency N magnitudes above photometric'] diff --git a/src/lvmguider/etc/lvmguider.yml b/src/lvmguider/etc/lvmguider.yml index a438301..ea9ba93 100644 --- a/src/lvmguider/etc/lvmguider.yml +++ b/src/lvmguider/etc/lvmguider.yml @@ -20,6 +20,7 @@ min_rot_correction: 0.01 max_rot_correction: 3 guide_tolerance: 1 +pa_tolerance: 0.012 revert_to_acquistion_threshold: 2 xz_full_frame: [2500.0, 1000.0] @@ -31,6 +32,15 @@ offset: acquisition: 10.0 guide: 5.0 +coadds: + paths: + coadd_camera_path: coadds/lvm.{telescope}.coadd.{camname}_{frameno0:08d}_{frameno1:08d}.fits + coadd_path: coadds/lvm.{telescope}.coadd_{frameno0:08d}_{frameno1:08d}.fits + coadd_spec_path: coadds/lvm.{{telescope}}.coadd_s{specno:08d}.fits + warnings: + pa_error: 0.012 + pa_drift: 0.012 + site: lon: -70.70166667 lat: -29.00333333 diff --git a/src/lvmguider/extraction.py b/src/lvmguider/extraction.py index e2f13a0..d99613b 100644 --- a/src/lvmguider/extraction.py +++ b/src/lvmguider/extraction.py @@ -297,7 +297,10 @@ def extract_marginal( assert isinstance(sex_detections, pandas.DataFrame) assert isinstance(back, numpy.ndarray) - detections[sex_detections.columns] = sex_detections + for column in sex_detections.columns: + dtype = detections.dtypes[column] + detections[column] = sex_detections[column].astype(dtype) + detections.loc[:, "valid"] = 0 if exclude_border: @@ -340,13 +343,15 @@ def extract_marginal( axis=1, ) - detections.loc[:, fit_df.columns] = fit_df + for column in fit_df.columns: + dtype = detections.dtypes[column] + detections[column] = fit_df[column].astype(dtype) valid = (detections.xfitvalid == 1) & (detections.yfitvalid == 1) - detections.loc[:, "valid"] = valid + detections.loc[:, "valid"] = valid.astype(numpy.int8) # Calculate FWHM as average of xstd and ystd. - detections.loc[:, "fwhm"] = 0.5 * (detections.xstd + detections.ystd) + detections.loc[:, "fwhm"] = 0.5 * (detections.xstd + detections.ystd).astype("f4") assert sub is not None @@ -488,13 +493,13 @@ def extract_sources(filename: str | pathlib.Path, subtract_dark: bool = True): Returns ------- sources - A data frame with the extracted sources along with their master frame + A data frame with the extracted sources along with their full frame coordinates. Note that in SExtractor fashion, all the pixel coordinates assume that the centre of the lower left pixel is ``(1, 1)``. """ - from lvmguider.transformations import ag_to_master_frame + from lvmguider.transformations import ag_to_full_frame hdus = fits.open(filename) @@ -524,7 +529,7 @@ def extract_sources(filename: str | pathlib.Path, subtract_dark: bool = True): sources["telescope"] = telescope xy = sources.loc[:, ["x", "y"]].to_numpy() - mf_locs, _ = ag_to_master_frame(f"{telescope}-{camname[0]}", xy) - sources.loc[:, ["x_mf", "y_mf"]] = mf_locs + ff_locs, _ = ag_to_full_frame(f"{telescope}-{camname[0]}", xy) + sources.loc[:, ["x_ff", "y_ff"]] = ff_locs return sources diff --git a/src/lvmguider/guider.py b/src/lvmguider/guider.py index 0d55000..0029daa 100644 --- a/src/lvmguider/guider.py +++ b/src/lvmguider/guider.py @@ -26,16 +26,16 @@ from lvmguider.tools import ( elapsed_time, estimate_zeropoint, - get_dark_subtrcted_data, + get_dark_subtracted_data, get_frameno, get_guider_path, header_from_model, + nan_or_none, run_in_executor, update_fits, ) from lvmguider.transformations import ( delta_radec2mot_axis, - get_crota2, match_with_gaia, solve_camera_with_astrometrynet, wcs_from_gaia, @@ -68,7 +68,7 @@ class Guider: field_centre The field centre on which we want to guider (RA, Dec, PA). pixel - The ``(x,y)`` pixel of the master frame to use to determine the pointing. + The ``(x,y)`` pixel of the full frame to use to determine the pointing. Default to the central pixel. """ @@ -85,6 +85,7 @@ def __init__( self.config = command.actor.config self.guide_tolerance = self.config["guide_tolerance"] + self.pa_tolerance = self.config["pa_tolerance"] self.field_centre = field_centre self.pixel = pixel or self.config["xz_full_frame"] @@ -124,7 +125,7 @@ def last(self): return self.solutions[max(self.solutions)] def set_pixel(self, pixel_x: float | None = None, pixel_z: float | None = None): - """Sets the master frame pixel coordinates ``(x, z)`` on which to guide.""" + """Sets the full frame pixel coordinates ``(x, z)`` on which to guide.""" if pixel_x is None or pixel_z is None: new_pixel = self.config["xz_full_frame"] @@ -140,12 +141,13 @@ def set_pixel(self, pixel_x: float | None = None, pixel_z: float | None = None): def is_guiding(self): """Are we guiding?""" - return self.command.actor.status & GuiderStatus.GUIDING + return bool(self.command.actor.status & GuiderStatus.GUIDING) def revert_to_acquisition(self): """Reset the flags for acquisition mode.""" - self.command.warning("Reverting to acquisition.") + if self.is_guiding(): + self.command.warning("Reverting to acquisition.") self.command.actor._status &= ~GuiderStatus.GUIDING self.command.actor._status |= GuiderStatus.ACQUIRING @@ -223,9 +225,14 @@ async def guide_one( # Initial guider solution. guider_solution = GuiderSolution( - frameno, - camera_solutions, + frameno=frameno, + solutions=camera_solutions, + telescope=self.telescope, guide_pixel=numpy.array(self.pixel), + ra_field=self.field_centre[0], + dec_field=self.field_centre[1], + pa_field=self.field_centre[2], + guide_mode="guide" if self.is_guiding() else "acquisition", ) if guider_solution.n_cameras_solved == 0: @@ -233,17 +240,18 @@ async def guide_one( else: try: - mf_wcs = wcs_from_gaia(guider_solution.sources, ["x_mf", "y_mf"]) - guider_solution.mf_wcs = mf_wcs - guider_solution.pa = get_crota2(mf_wcs) + assert guider_solution.sources is not None + ff_wcs = wcs_from_gaia(guider_solution.sources, ["x_ff", "y_ff"]) + guider_solution.wcs = ff_wcs except RuntimeError as err: - self.command.error(f"Failed generating master frame WCS: {err}") + self.command.error(f"Failed generating full frame WCS: {err}") offset_radec, offset_motax, offset_pa = self.calculate_offset(guider_solution) guider_solution.ra_off = offset_radec[0] guider_solution.dec_off = offset_radec[1] + guider_solution.pa_off = offset_pa guider_solution.axis0_off = offset_motax[0] guider_solution.axis1_off = offset_motax[1] @@ -311,6 +319,8 @@ async def guide_one( finally: self.command.actor.status &= ~GuiderStatus.CORRECTING + guider_solution.correction = [*applied_motax.tolist(), applied_rot] + self.command.info( correction_applied={ "frameno": frameno, @@ -319,12 +329,21 @@ async def guide_one( } ) - guider_solution.correction = [*applied_motax.tolist(), corr_rot] + # Are we close enough to the field centre to guide using Gaia? + sep_reached = guider_solution.separation < self.guide_tolerance + + # Have we converged in PA? Note that we can only move in positive + # offsets in the k-mirror, so if the offset_pa is < 0 we've gone + # too far and we cannot easily fix it. That's why here we don't + # take abs(offset_pa) since self.pa_tolerance is positive. + pa_reached = numpy.isnan(offset_pa) or offset_pa < self.pa_tolerance - reached = guider_solution.separation < self.guide_tolerance + # If the separation is > certain threshold (which usually is larger + # than the threshold for considering we are guiding in the first place) + # then we revert to acquisition. revert = guider_solution.separation > revert_to_acquistion_threshold - if reached and not self.is_guiding(): + if sep_reached and pa_reached and not self.is_guiding(): self.command.info("Guide tolerance reached. Starting to guide.") self.command.actor._status &= ~GuiderStatus.ACQUIRING self.command.actor._status &= ~GuiderStatus.DRIFTING @@ -347,7 +366,7 @@ async def solve_camera( frameno = get_frameno(file) hdul = fits.open(str(file)) - data, dark_sub = get_dark_subtrcted_data(file) + data, dark_sub = get_dark_subtracted_data(file) if not dark_sub: self.command.debug(f"No dark frame found for {file!s}. Fitting background.") @@ -356,7 +375,12 @@ async def solve_camera( sources_file = file.parent / file.with_suffix(".parquet") if not sources_file.exists(): self.command.warning(f"Cannot find sources file for camera {camname!r}.") - return CameraSolution(frameno, camname, file) + return CameraSolution( + frameno=frameno, + camera=camname, + path=file, + telescope=self.telescope, + ) sources = pandas.read_parquet(sources_file) matched_sources = sources.copy() @@ -441,15 +465,15 @@ async def solve_camera( sources.update(zp) camera_solution = CameraSolution( - frameno, - camname, - file, + frameno=frameno, + camera=camname, + path=file, sources=sources, wcs_mode=wcs_mode, wcs=wcs, matched=matched, ref_frame=ref_frame, - zero_point=zp.zp.median(), + telescope=self.telescope, ) if camera_solution.solved is False: @@ -499,7 +523,7 @@ def calculate_offset( offset_pa = numpy.nan else: pointing_pa = (solution.pa - 180) % 360 - offset_pa = field_pa - pointing_pa + offset_pa = (field_pa - pointing_pa) % 360 return ((ra_arcsec, dec_arcsec), (saz_diff_d, sel_diff_d), offset_pa) @@ -545,8 +569,8 @@ async def offset_telescope( max_ax_correction = self.config.get("max_ax_correction", 3600) - # min_rot_correction = self.config.get("min_rot_correction", 0.01) - # max_rot_correction = self.config.get("max_rot_correction", 3) + min_rot_correction = self.config.get("min_rot_correction", 0.01) + max_rot_correction = self.config.get("max_rot_correction", 3) if numpy.any(numpy.abs([off0, off1]) > max_ax_correction): self.command.error("Requested correction is too big. Not applying it.") @@ -576,18 +600,18 @@ async def offset_telescope( if cmd.status.did_fail: raise RuntimeError(f"Failed offsetting telescope {telescope}.") - # if self.config.get("has_kmirror", True) and self.is_rot_offset_allowed(): - # if off_rot > max_rot_correction: - # self.command.warning("Requested rotator correction is too big.") - # elif off_rot > min_rot_correction: - # km = f"lvm.{telescope}.km" - # cmd_km_str = f"slewAdjust --offset_angle {off_rot:.6f}" - # cmd_km = await self.command.send_command(km, cmd_km_str) + if self.config.get("has_kmirror", True) and self.is_rot_offset_allowed(): + if off_rot > max_rot_correction: + self.command.warning("Requested rotator correction is too big.") + elif off_rot > min_rot_correction: + km = f"lvm.{telescope}.km" + cmd_km_str = f"slewAdjust --offset_angle {off_rot:.6f}" + cmd_km = await self.command.send_command(km, cmd_km_str) - # if cmd_km.status.did_fail: - # self.command.error(f"Failed offsetting k-mirror {telescope}.") - # else: - # applied_rot = off_rot + if cmd_km.status.did_fail: + self.command.error(f"Failed offsetting k-mirror {telescope}.") + else: + applied_rot = off_rot return applied_radec, applied_motax, applied_rot @@ -600,17 +624,21 @@ def is_rot_offset_allowed(self): """ + if self.is_guiding(): + return False + ROT_MIN_ITERATIONS: int = 3 - if len(self.solutions) == 0: + if len(self.solutions) <= 1: return True n_last_rot_correction: int = 0 for frameno in list(self.solutions)[::-1]: + n_last_rot_correction += 1 + if abs(self.solutions[frameno].correction[-1]) > 0: return n_last_rot_correction >= ROT_MIN_ITERATIONS - n_last_rot_correction += 1 if n_last_rot_correction >= ROT_MIN_ITERATIONS: return True @@ -619,15 +647,10 @@ def is_rot_offset_allowed(self): async def update_fits(self, guider_solution: GuiderSolution): """Updates the ``lvm.agcam`` files and creates the ``lvm.guider`` file.""" - def nan_or_none(value: float, precision: int | None = None): - if numpy.isnan(value): - return None - if precision is not None: - return numpy.round(value, precision) - return value - coros = [] + assert guider_solution.sources is not None + # Update lvm.agcam PROC extension. for solution in guider_solution.solutions: file = solution.path.absolute() @@ -659,7 +682,7 @@ def nan_or_none(value: float, precision: int | None = None): guider_path = get_guider_path(guider_solution.solutions[0].path) sources_path = guider_path.with_suffix(".parquet") - gheader = header_from_model("GUIDERDATA") + gheader = header_from_model("GUIDERDATA_HEADER") gheader["GUIDERV"] = __version__ gheader["TELESCOP"] = self.telescope @@ -679,8 +702,8 @@ def nan_or_none(value: float, precision: int | None = None): gheader["RAFIELD"] = numpy.round(self.field_centre[0], 6) gheader["DECFIELD"] = numpy.round(self.field_centre[1], 6) gheader["PAFIELD"] = numpy.round(self.field_centre[2], 4) - gheader["XMFPIX"] = guider_solution.guide_pixel[0] - gheader["ZMFPIX"] = guider_solution.guide_pixel[1] + gheader["XFFPIX"] = guider_solution.guide_pixel[0] + gheader["ZFFPIX"] = guider_solution.guide_pixel[1] gheader["SOLVED"] = guider_solution.solved gheader["RAMEAS"] = nan_or_none(guider_solution.pointing[0], 6) gheader["DECMEAS"] = nan_or_none(guider_solution.pointing[1], 6) @@ -704,8 +727,8 @@ def nan_or_none(value: float, precision: int | None = None): gheader["AX1KD"] = self.pid_ax1.Kd gheader["ZEROPT"] = nan_or_none(guider_solution.zero_point, 3) - if guider_solution.mf_wcs: - gheader.extend(guider_solution.mf_wcs.to_header()) + if guider_solution.wcs: + gheader.extend(guider_solution.wcs.to_header()) guider_hdul = fits.HDUList([fits.PrimaryHDU()]) guider_hdul.append(fits.ImageHDU(data=None, header=gheader, name="GUIDERDATA")) diff --git a/src/lvmguider/plotting.py b/src/lvmguider/plotting.py new file mode 100644 index 0000000..9038e2f --- /dev/null +++ b/src/lvmguider/plotting.py @@ -0,0 +1,598 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# @Author: José Sánchez-Gallego (gallegoj@uw.edu) +# @Date: 2023-09-03 +# @Filename: plotting.py +# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) + +from __future__ import annotations + +import os +import pathlib + +from typing import TYPE_CHECKING, Callable, Sequence + +import matplotlib +import matplotlib.pyplot as plt +import matplotlib.text +import matplotlib.transforms as mtransforms +import numpy +import seaborn +from matplotlib.axes import Axes +from matplotlib.transforms import Bbox + +from lvmguider import config + + +if TYPE_CHECKING: + import pandas + from matplotlib.figure import Figure + + from lvmguider.dataclasses import GlobalSolution + + +matplotlib.use("agg") + + +AnyPath = str | os.PathLike + + +def plot_qa( + solution: GlobalSolution, + root: AnyPath | None = None, + save_subplots: bool = True, +): + """Produces guider QA plots from a global solution object.""" + + root = pathlib.Path(root or (solution.path.parent / "qa")) + root.mkdir(parents=True, exist_ok=True) + + stem = solution.path.stem + + # Set up seaborn and matplotlib. + seaborn.set_theme( + style="darkgrid", + palette="deep", + font="serif", + font_scale=1.2, # type: ignore + ) + + with plt.ioff(): + # Plot PA + outpath_pa = root / (stem + "_pa.pdf") + plot_position_angle(outpath_pa, solution, save_subplots=save_subplots) + + # Plot ZP + outpath_zp = root / (stem + "_zp.pdf") + plot_zero_point_or_fwhm( + outpath_zp, + solution, + save_subplots=save_subplots, + column="zero_point", + ) + + # Plot FWHM + outpath_fwhm = root / (stem + "_fwhm.pdf") + plot_zero_point_or_fwhm( + outpath_fwhm, + solution, + save_subplots=save_subplots, + column="fwhm", + ) + + # Plot guider offsets + outpath_fwhm = root / (stem + "_guide_offsets.pdf") + plot_guider_offsets( + outpath_fwhm, + solution, + save_subplots=save_subplots, + ) + + seaborn.reset_orig() + + +def get_subplot_path( + orig_path: AnyPath, + suffix: str = "", + extension="png", + subdir: str = "subplots", +): + """Creates a path for a subplot.""" + + orig_path = pathlib.Path(orig_path) + + orig_parent = orig_path.parent + stem = orig_path.stem + + path = orig_parent + + if subdir: + path /= subdir + + return path / (stem + suffix + f".{extension}") + + +def create_subplot(func: Callable, path: AnyPath, *args, dpi: int = 100, **kwargs): + """Creates a subplot by calling a plotting function.""" + + pathlib.Path(path).parent.mkdir(parents=True, exist_ok=True) + + fig, ax = plt.subplots(figsize=(11, 8)) + func(ax, *args, **kwargs) + fig.savefig(str(path), dpi=dpi) + + +def save_subplot( + fig: Figure, + axes: Axes | list[Axes], + path: AnyPath, + pad: float | Sequence[float] = (0.01, 0.0), + bbox_range: Sequence[float] | None = None, +): + """Saves a subplot, including axes labels, tick labels, and titles.""" + + # https://stackoverflow.com/questions/4325733/save-a-subplot-in-matplotlib + + if isinstance(axes, Axes): + axes = [axes] + + pathlib.Path(path).parent.mkdir(parents=True, exist_ok=True) + + if bbox_range is not None: + fig.savefig( + str(path), + bbox_inches=mtransforms.Bbox( + # This is in "figure fraction" for the bottom half + # input in [[xmin, ymin], [xmax, ymax]] + numpy.array( + [ + [bbox_range[0], bbox_range[1]], + [bbox_range[2], bbox_range[3]], + ] + ) + ).transformed( + (fig.transFigure - fig.dpi_scale_trans) # type: ignore + ), + ) + return + + items = [] + # For text objects, we need to draw the figure first, otherwise + # the extents are undefined. + for ax in axes: + ax.figure.canvas.draw() + + items += ax.get_xticklabels() + items += ax.get_yticklabels() + items += [ax.get_xaxis().get_label(), ax.get_yaxis().get_label()] + items += [ax, ax.title] # type: ignore + + bbox = Bbox.union([item.get_window_extent() for item in items]) + + if isinstance(pad, Sequence): + bbox = bbox.expanded(1.0 + pad[0], 1.0 + pad[1]) + else: + bbox = bbox.expanded(1.0 + pad, 1.0 + pad) + + extent = bbox.transformed(fig.dpi_scale_trans.inverted()) # type: ignore + + fig.savefig(str(path), bbox_inches=extent) + + +def get_camera_figure( + left: float = 0.1, + right: float = 0.9, + bottom: float = 0.075, + top: float = 0.91, + wspace: float = 0.1, + hspace: float = 0.3, +): + """Returns a customised figure and axes.""" + + fig, axd = plt.subplot_mosaic( + [["east", "west"], ["global", "global"]], + figsize=(11, 8), + ) + fig.subplots_adjust( + left=left, + right=right, + bottom=bottom, + top=top, + wspace=wspace, + hspace=hspace, + ) + + for name, ax in axd.items(): + ax.axis("off") + + ax.set_xlabel("Frame number") + + if name == "global": + ax.set_title("Full frame") + else: + ax.set_title(f"Camera {str(name).capitalize()}") + + ax.ticklabel_format(useOffset=False) + ax.xaxis.get_major_locator().set_params(integer=True) + + return fig, axd + + +def plot_position_angle( + outpath: pathlib.Path, + solution: GlobalSolution, + save_subplots: bool = False, +): + """Plots the position angle.""" + + fig, axd = get_camera_figure(wspace=0.4) + + # Plot top panels with the East and West camera PA and PA error. + for camera_solution in solution.coadd_solutions: + if camera_solution.frames is None or len(camera_solution.frames) == 0: + continue + + camera = camera_solution.camera + frame_data = camera_solution.frame_data() + + if len(frame_data.pa.dropna()) < 2: + continue + + # Unhide plot. + ax = axd[camera] # type: ignore + ax.axis("on") + + # Plot PA. + axes = _plot_pa_axes( + ax, + frame_data, + pa_error_mode="mean", + legend=False, + ) + + # Remove the inside labels since they are identical left and right. + if camera == "east": + axes[1].set_ylabel("") + else: + axes[0].set_ylabel("") + + if save_subplots: + create_subplot( + _plot_pa_axes, + get_subplot_path(outpath, f"_{camera}"), + frame_data, + title=f"Camera {camera.capitalize()}", + pa_error_mode="mean", + legend=True, + ) + + # Now use the PAs from the guider data (i.e., full frame). + guider_data = solution.guider_data() + guider_data = guider_data.loc[guider_data.guide_mode == "guide"] + if len(guider_data.pa.dropna()) >= 2: + ax = axd["global"] # type: ignore + ax.axis("on") + + _plot_pa_axes(ax, guider_data, pa_error_mode="first", legend=True) + + if save_subplots: + create_subplot( + _plot_pa_axes, + get_subplot_path(outpath), + guider_data, + title="Full frame", + pa_error_mode="first", + legend=True, + ) + + fig.suptitle(f"Position angle for {solution.path.name}") + fig.savefig(str(outpath)) + + plt.close("all") + + +def _plot_pa_axes( + ax: Axes, + data: pandas.DataFrame, + title: str = "", + pa_error_mode: str = "first", + legend: bool = True, +): + """Plot PA axes.""" + + ax.ticklabel_format(useOffset=True) + + pa_threshold = config["coadds"]["warnings"]["pa_error"] + + # Create right axis for the PA error. + right_ax = ax.twinx() + right_ax.ticklabel_format(useOffset=True) + + # Plot absolute position angle. + (pa_plot,) = ax.plot( + data.frameno, + data.pa, + "b-", + zorder=20, + label="Position angle", + ) + + # For the cameras we calculate the error wrt the mean PA. + # For the full frame PAs we use the initial PA and calculate the "drift". + if pa_error_mode == "first": + ref_pa = data.pa.dropna().iloc[0] + else: + ref_pa = data.pa.dropna().mean() + + # Plot PA error. + (pa_error_plot,) = right_ax.plot( + data.frameno, + ref_pa - data.pa, + "g-", + zorder=20, + label="PA error", + ) + + # If the PA error goes over the limit, show a band with the threshold. + abs_error = (ref_pa - data.pa).abs().max() + if abs_error > pa_threshold: + right_ax.axhspan( + ymin=-pa_threshold, + ymax=pa_threshold, + facecolor="red", + edgecolor="None", + linestyle="None", + alpha=0.2, + zorder=5, + ) + + if legend: + ax.legend( + handles=[pa_plot, pa_error_plot], + loc="upper left", + ).set_zorder(100) + ax.margins(0.05, 0.2) + + # The left and right y-axis are different z-stacks so it's not possible to mix + # and match. This moves the right y-axis to the background. + right_ax.set_zorder(-1) + ax.patch.set_visible(False) # type: ignore + ax.grid(False) + right_ax.patch.set_visible(True) # type: ignore + + ax.set_xlabel("Frame number") + ax.set_ylabel("Position angle [deg]", labelpad=10) + right_ax.set_ylabel("Position angle error [deg]", labelpad=10) + + if title: + ax.set_title(title) + + return [ax, right_ax] + + +def plot_zero_point_or_fwhm( + outpath: pathlib.Path, + solution: GlobalSolution, + save_subplots: bool = False, + column="zero_point", +): + """Plots the zero point or FWHM of an exposure.""" + + fig, axd = get_camera_figure() + + # Plot top panels with the East and West camera zero point or FWHM. + for camera_solution in solution.coadd_solutions: + camera = camera_solution.camera + + # Get all the frames and the ZP/FWHM value of the co-added camera image. + frame_data = camera_solution.frame_data() + coadd_value = getattr(camera_solution, column) + + if len(frame_data) == 0 and numpy.isnan(coadd_value): + continue + + # Un-hide the axes. + ax = axd[camera] # type: ignore + ax.axis("on") + + # Plot data. + ax = _plot_zero_point_or_fwhm_axes( + ax, + frame_data, + coadd_value, + column=column, + legend=False, + ) + + # Move the West camera y-axis to the right to unclutter the central section. + if camera == "west": + ax.yaxis.tick_right() + ax.yaxis.set_label_position("right") + + if save_subplots: + create_subplot( + _plot_zero_point_or_fwhm_axes, + get_subplot_path(outpath, f"_{camera}"), + frame_data, + coadd_value, + column=column, + title=f"Camera {camera.capitalize()}", + legend=True, + ) + + # Now do the full frame. Almost identical to above. + guider_data = solution.guider_data() + guider_data = guider_data.loc[guider_data.guide_mode == "guide"] + global_value = getattr(solution, column) + + if len(guider_data.zero_point.dropna()) >= 2 or not numpy.isnan(global_value): + ax = axd["global"] # type: ignore + ax.axis("on") + + _plot_zero_point_or_fwhm_axes( + ax, + guider_data, + global_value, + column=column, + legend=True, + ) + + if save_subplots: + create_subplot( + _plot_zero_point_or_fwhm_axes, + get_subplot_path(outpath), + guider_data, + global_value, + column=column, + title="Full frame", + legend=True, + ) + + if column == "zero_point": + fig.suptitle(f"Zero point for {solution.path.name}") + else: + fig.suptitle(f"FWHM for {solution.path.name}") + + fig.savefig(str(outpath)) + + plt.close("all") + + +def _plot_zero_point_or_fwhm_axes( + ax: Axes, + data: pandas.DataFrame, + coadd: float, + column="zero_point", + title: str | None = None, + legend: bool = True, +): + """Plot zero point or FWHM axes.""" + + handles = [] + + # Plot the data for each frame. + (plot,) = ax.plot( + data.frameno, + data[column], + "b-", + label="Frames", + ) + handles.append(plot) + + # If the co-added value exists, plot it as a horizontal dashed line. + if not numpy.isnan(coadd): + median_line = ax.axhline( + y=coadd, + color="k", + linestyle="dashed", + label="Co-add", + alpha=0.5, + ) + handles.append(median_line) + + if legend: + loc = "lower right" if column == "zero_point" else "upper left" + ax.legend(handles=handles, loc=loc).set_zorder(100) + ax.margins(0.05, 0.25) + + ax.set_xlabel("Frame number") + + if column == "zero_point": + ax.set_ylabel("Zero Point [mag]", labelpad=10) + else: + ax.set_ylabel("FWHM [arcsec]", labelpad=10) + + if title: + ax.set_title(title) + + return ax + + +def plot_guider_offsets( + outpath: pathlib.Path, + solution: GlobalSolution, + save_subplots: bool = False, +): + """Plots guider data (RA/Dec/PA offsets and applied corrections).""" + + fig, axd = plt.subplot_mosaic( + [["sep", "sep"], ["meas", "applied"]], + figsize=(11, 8), + ) + + fig.subplots_adjust( + left=0.1, + right=0.9, + bottom=0.075, + top=0.93, + wspace=0.1, + hspace=0.3, + ) + + gdata = solution.guider_data() + gdata = gdata.loc[gdata.guide_mode == "guide"] + + # Top panel. On the left y axis plot the separation to the field + # centre. In the right y axis plot the PA error (field - measured). + gdata["separation"] = numpy.hypot(gdata.ra_off, gdata.dec_off) + sep_data = gdata.loc[:, ["frameno", "separation"]] + + sep_ax = axd["sep"] # type: ignore + (sep_plot,) = sep_ax.plot(sep_data.frameno, sep_data.separation, "b-") + sep_ax.set_xlabel("Frame number") + sep_ax.set_ylabel("Separation [arcsec]", labelpad=10) + + sep_ax_r = sep_ax.twinx() + sep_ax_r.grid(False) + (pa_off_plot,) = sep_ax_r.plot(sep_data.frameno, gdata.pa_off, "g-") + sep_ax_r.set_ylabel("Position angle error [deg]", labelpad=10) + sep_ax_r.legend( + handles=[sep_plot, pa_off_plot], + labels=["Separation", "PA error"], + loc="upper right", + ) + + # Bottom left panel. Plot the measured offsets in RA, Dec.. + meas_ax = axd["meas"] # type: ignore + meas_ax.plot(gdata.frameno, gdata.ra_off, "b-", label="RA", linewidth=0.5) + meas_ax.plot(gdata.frameno, gdata.dec_off, "r-", label="Dec", linewidth=0.5) + meas_ax.legend(loc="lower left") + meas_ax.set_xlabel("Frame number") + meas_ax.set_ylabel("Measured error [arcsec]", labelpad=10) + + # Bottom right panel. Plot the applied corrections in RA, Dec. + # No point in plotting PA corrections since they are not applied + # during guiding and we have excluded acquisition. + appl_ax = axd["applied"] # type: ignore + appl_ax.plot(gdata.frameno, gdata.ax0_applied, "b-", linewidth=0.5) + appl_ax.plot(gdata.frameno, gdata.ax1_applied, "r-", linewidth=0.5) + appl_ax.set_xlabel("Frame number") + appl_ax.set_ylabel("Applied offset [arcsec]") + appl_ax.yaxis.tick_right() + appl_ax.yaxis.set_label_position("right") + + if save_subplots: + save_subplot( + fig, + [sep_ax, sep_ax_r], + get_subplot_path(outpath, ""), + bbox_range=[0.0, 0.47, 1.0, 0.98], + ) + + save_subplot( + fig, + [meas_ax], + get_subplot_path(outpath, "_measured"), + bbox_range=[0.0, 0.0, 0.505, 0.48], + ) + + save_subplot( + fig, + [appl_ax], + get_subplot_path(outpath, "_applied"), + bbox_range=[0.505, 0.0, 1.0, 0.48], + ) + + fig.suptitle(f"Guider data for {solution.path.name}") + fig.savefig(str(outpath)) diff --git a/src/lvmguider/tools.py b/src/lvmguider/tools.py index ef989f8..b761bf3 100644 --- a/src/lvmguider/tools.py +++ b/src/lvmguider/tools.py @@ -10,14 +10,16 @@ import asyncio import concurrent.futures +import os.path import pathlib import re import warnings +from collections import defaultdict from contextlib import contextmanager from functools import partial from time import time -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, Sequence import nptyping import numpy @@ -26,13 +28,16 @@ import pgpasslib import sep from astropy.io import fits +from astropy.stats import sigma_clip from astropy.table import Table +from astropy.time import Time, TimezoneInfo from psycopg2 import OperationalError from sdsstools import read_yaml_file +from sdsstools.time import get_sjd from lvmguider import config, log -from lvmguider.types import ARRAY_2D_F32 +from lvmguider.types import ARRAY_1D_F32, ARRAY_2D_F32 if TYPE_CHECKING: @@ -188,7 +193,7 @@ def elapsed_time(command: GuiderCommand, task_name: str = "unnamed"): command.actor.log.debug(f"Elapsed time for task {task_name!r}: {time()-t0:.3f} s") -def get_frameno(file: pathlib.Path | str) -> int: +def get_frameno(file: os.PathLike | str) -> int: """Returns the frame number for a frame.""" match = re.match(r"^.+?([0-9]+)\.fits$", str(file)) @@ -410,6 +415,10 @@ def get_gaia_sources( df = pandas.DataFrame.from_records(data) + # Cast to correct types + for column in ["pmra", "pmdec", "phot_g_mean_mag", "lmag_ab", "lflux"]: + df[column] = df[column].astype(numpy.float32) + return df @@ -460,35 +469,47 @@ def estimate_zeropoint( # of an object that produces 1 count per second on the detector. # For an arbitrary object producing DT counts per second then # m = -2.5 x log10(DN) - ZP - zp = -2.5 * numpy.log10(flux_adu * gain) - sources.lmag_ab + valid = flux_adu > 0 + zp = -2.5 * numpy.log10(flux_adu[valid] * gain) - sources.lmag_ab[valid] + + df = pandas.DataFrame(columns=["ap_flux", "ap_fluxerr", "zp"], dtype=numpy.float32) - df = pandas.DataFrame() - df["ap_flux"] = flux_adu - df["ap_fluxerr"] = fluxerr_adu - df["zp"] = zp + # There is a weirdness in Pandas that if flux_adu is a single value then the + # .loc[:, "ap_flux"] would not set any values. So we replace the entire column + # but endure the dtypes. + df["ap_flux"] = flux_adu.astype(numpy.float32) + df["ap_fluxerr"] = fluxerr_adu.astype(numpy.float32) + df.loc[valid, "zp"] = zp.astype(numpy.float32) df.index = sources.index return df -def get_dark_subtrcted_data(file: pathlib.Path | str) -> tuple[ARRAY_2D_F32, bool]: +def get_dark_subtracted_data(file: pathlib.Path | str) -> tuple[ARRAY_2D_F32, bool]: """Returns a background or dark subtracted image.""" - hdul = fits.open(str(file)) + path = pathlib.Path(file) + + hdul = fits.open(str(path)) exptime = hdul["RAW"].header["EXPTIME"] # Data in counts per second. data: ARRAY_2D_F32 = hdul["RAW"].data.copy().astype("f4") / exptime - # Get data and subtract dark or fit background. - dirname = hdul["PROC"].header["DIRNAME"] - dark_file = hdul["PROC"].header["DARKFILE"] + if "PROC" in hdul: + # Get data and subtract dark or fit background. + dirname = hdul["PROC"].header.get("DIRNAME", path.parent) + dark_file = hdul["PROC"].header.get("DARKFILE", "") - dark_path = pathlib.Path(dirname) / dark_file + dark_path = pathlib.Path(dirname) / dark_file - if dark_file != "" and dark_path.exists(): + else: + dark_file = "" + dark_path = None + + if dark_file != "" and dark_path is not None and dark_path.exists(): dark: ARRAY_2D_F32 = fits.getdata(str(dark_path), "RAW").astype("f4") dark_exptime: float = fits.getval(str(dark_path), "EXPTIME", "RAW") data = data - dark / dark_exptime @@ -500,3 +521,246 @@ def get_dark_subtrcted_data(file: pathlib.Path | str) -> tuple[ARRAY_2D_F32, boo dark_sub = False return data, dark_sub + + +def get_files_in_time_range( + path: pathlib.Path, + time0: Time, + time1: Time, + pattern: str = "*", +): + """Returns all files in a directory with modification time in the time range. + + Parameters + ---------- + path + The path on which to search for files. + tile0,time1 + The range of times, as astropy ``Time`` objects. + pattern + A pattern to use to subset the files in the directory. Defaults to + search all files. + + Returns + ------- + files + A list of paths to files in the directory that where last modified in the + selected time range. + + """ + + files = path.glob(pattern) + + tz = TimezoneInfo() + dt0 = time0.to_datetime(tz).timestamp() + dt1 = time1.to_datetime(tz).timestamp() + + return [ + file + for file in files + if os.path.getmtime(file) > dt0 and os.path.getmtime(file) < dt1 + ] + + +def get_agcam_in_time_range( + path: pathlib.Path, + time0: Time, + time1: Time, + pattern: str = "*.fits", +): + """Similar to `.get_files_in_time_range` but uses the keyword ``DATE-OBS``.""" + + files = path.glob(pattern) + + matched: list[pathlib.Path] = [] + for file in files: + try: + hdul = fits.open(file) + except Exception: + continue + + if "RAW" in hdul: + header = hdul["RAW"].header + else: + header = hdul[0].header + + if "DATE-OBS" not in header: + continue + + time = Time(header["DATE-OBS"], format="isot") + # print(time, time0, time1, time < time1, time > time0) + if time > time0 and time < time1: + matched.append(file) + + return matched + + +def get_guider_files_from_spec( + spec_file: str | pathlib.Path, + telescope: str | None = None, + agcam_path: str | pathlib.Path = "/data/agcam", + camera: str | None = None, + use_time_range: bool | None = None, +): + """Returns the AG files taken during a spectrograph exposure. + + A convenience function that reads the header of the spectrograph frame, + extracts the guide frames range and returns a list of those frames + filenames (missing frames are skipped). + + Parameters + ---------- + spec_file + The path to the spectrograph frame to use to determine the range. + telescope + The telescope for which to extract the range of guide/AG frames. + Defaults to the value in the configuration file. + agcam_path + The ``agcam`` path where AG frames are ordered by SJD. + camera + The camera, east or west, for which to select frames. If not provided + selects both cameras. + use_time_range + By default (``time_time_range=None``) the function will check the + ``G{telescope}FR0`` and ``G{telescope}FRN`` keywords to determine the + range of guider frames. If those keywords are not present, the AG + files that were last modified in the range of the integration will + be found. With `True`, the last modification range method will always + be used. + + Returns + ------- + frames + A list of AG file names that match the range of guide frames + taken during the spectrograph exposure. Missing frames in the + range are not included. + + """ + + spec_file = pathlib.Path(spec_file) + if not spec_file.exists(): + raise FileExistsError(f"Cannot find file {spec_file!s}") + + header = fits.getheader(str(spec_file)) + + if telescope is None: + telescope = config["telescope"] + + assert isinstance(telescope, str) + + sjd = get_sjd("LCO", date=Time(header["OBSTIME"], format="isot").to_datetime()) + + agcam_path = pathlib.Path(agcam_path) / str(sjd) + if not agcam_path.exists(): + raise FileExistsError(f"Cannot find agcam path {agcam_path!s}") + + frame0 = header.get(f"G{telescope.upper()}FR0", None) + frame1 = header.get(f"G{telescope.upper()}FRN", None) + + if use_time_range is True or (frame0 is None or frame1 is None): + log.warning(f"Matching guider frames by date for file {spec_file.name}") + + time0 = Time(header["INTSTART"]) + time1 = Time(header["INTEND"]) + files = get_agcam_in_time_range( + agcam_path, + time0, + time1, + pattern=f"lvm.{telescope}.*.fits", + ) + + if len(files) == 0: + raise ValueError(f"Cannot find guider frames for {spec_file!s}") + + return sorted(files) + + return get_frame_range(agcam_path, telescope, frame0, frame1, camera=camera) + + +def get_frame_range( + path: str | pathlib.Path, + telescope: str, + frameno0: int, + frameno1: int, + camera: str | None = None, +) -> list[pathlib.Path]: + """Returns a list of AG frames in a frame number range. + + Parameters + ---------- + path + The directory where the files are written to. + telescope + The telescope for which to select frames. + frameno0 + The initial frame number. + frameno1 + The final frame number. + camera + The camera, east or west, for which to select frames. If not provided + selects all cameras. + + Returns + ------- + frames + A sorted list of frames in the desired range. + + """ + + if camera is None: + files = pathlib.Path(path).glob(f"lvm.{telescope}.agcam.*.fits") + else: + files = pathlib.Path(path).glob(f"lvm.{telescope}.agcam.{camera}_*.fits") + + selected: list[pathlib.Path] = [] + for file in files: + frameno = get_frameno(file) + if frameno >= frameno0 and frameno <= frameno1: + selected.append(file) + + return list(sorted(selected)) + + +def polyfit_with_sigclip( + x: ARRAY_1D_F32, + y: ARRAY_1D_F32, + sigma: int = 3, + deg: int = 1, +): + """Fits a polynomial to data after sigma-clipping the dependent variable.""" + + valid = ~sigma_clip(y, sigma=sigma, masked=True).mask # type:ignore + + return numpy.polyfit(x[valid], y[valid], deg) + + +def nan_or_none(value: float | None, precision: int | None = None): + """Replaces ``NaN`` with `None`. If the value is not ``NaN``, rounds up.""" + + if value is None or numpy.isnan(value): + return None + + if precision is not None: + return numpy.round(value, precision) + + return value + + +def sort_files_by_camera(files: Sequence[str | os.PathLike]): + """Returns a dictionary of camera name to list of files.""" + + camera_to_files: defaultdict[str, list[pathlib.Path]] = defaultdict(list) + for file in files: + for camera in ["east", "west"]: + if camera in str(file): + camera_to_files[camera].append(pathlib.Path(file)) + break + + return camera_to_files + + +def angle_difference(angle1: float, angle2: float): + """Calculates the shorted difference between two angles.""" + + diff = (angle2 - angle1 + 180) % 360 - 180 + return diff + 360 if diff < -180 else diff diff --git a/src/lvmguider/transformations.py b/src/lvmguider/transformations.py index bb68a77..223885b 100644 --- a/src/lvmguider/transformations.py +++ b/src/lvmguider/transformations.py @@ -11,6 +11,8 @@ import pathlib from datetime import datetime +from typing import Any, cast + import numpy import pandas from astropy.coordinates import EarthLocation, SkyCoord @@ -31,14 +33,14 @@ conf.iers_degraded_accuracy = "ignore" -def ag_to_master_frame( +def ag_to_full_frame( camera: str, in_locs: numpy.ndarray, rot_shift: tuple | None = None, verbose=False, ): """Rotate and shift the star locations from an AG frame to the correct locations in - the large "master frame" corresponding to the full focal plane. + the large "full frame" (master frame) corresponding to the full focal plane. Adapted from Tom Herbst's code. @@ -55,7 +57,7 @@ def ag_to_master_frame( Returns ------- rs_loc - Array of corresponding star locations in "master frame". + Array of corresponding star locations in "full frame". """ @@ -86,7 +88,7 @@ def ag_to_master_frame( if verbose: print("") - print("ag_to_master_frame: rot_shift for ", camera, numpy.degrees(th), Sx, Sz) + print("ag_to_full_frame: rot_shift for ", camera, numpy.degrees(th), Sx, Sz) print("") # Rotation matrix @@ -105,8 +107,8 @@ def ag_to_master_frame( return (rsLoc.T, (th, Sx, Sz)) -def master_frame_to_ag(camera: str, locs: numpy.ndarray): - """Transforms the star locations from the master frame +def full_frame_to_ag(camera: str, locs: numpy.ndarray): + """Transforms the star locations from the full frame to the internal pixel locations in the AG frame. Adapted from Tom Herbst. @@ -151,7 +153,7 @@ def master_frame_to_ag(camera: str, locs: numpy.ndarray): # Make 2xnPts Sx,Sz offset matrix off = numpy.tile(numpy.array([[Sx, Sz]]).transpose(), (1, locs.shape[0])) - # Transform MF --> AG. See Single_Sensor_Solve doc + # Transform FF --> AG. See Single_Sensor_Solve doc rsLoc = numpy.dot(M, (locs.T - rOff - off)) + rOff # Return calculated positions (need to transpose for standard layout) @@ -183,7 +185,7 @@ def solve_locs( dec The estimated Dec of the field. full_frame - Whether this is a full "master frame" field, requiring a different set of + Whether this is a full "full frame" field, requiring a different set of index files. index_paths The paths to the index files to use. A dictionary of series number @@ -220,7 +222,7 @@ def solve_locs( radius = 5 # Search radius in degrees if full_frame: - midX, midZ = config["xz_full_frame"] # Middle of Master Frame + midX, midZ = config["xz_full_frame"] # Middle of full Frame index_paths_default = { 5200: "/data/astrometrynet/5200", 4100: "/data/astrometrynet/4100", @@ -234,7 +236,7 @@ def solve_locs( locs = locs.copy() if full_frame: - locs = locs.rename(columns={"x_master": "x", "y_master": "y"}) + locs = locs.rename(columns={"x_full": "x", "y_full": "y"}) solution = astrometrynet_quick( index_paths, @@ -454,7 +456,7 @@ def get_crota2(wcs: WCS): raise ValueError("WCS does not have information to determine CROTA2.") crota2 = numpy.degrees(numpy.arctan2(-cd[0, 1], cd[1, 1])) - return crota2 if crota2 > 0 else crota2 + 360 + return float(crota2 if crota2 > 0 else crota2 + 360) def match_with_gaia( @@ -463,6 +465,7 @@ def match_with_gaia( gaia_sources: pandas.DataFrame | None = None, max_separation: float = 2, concat: bool = False, + db_connection_params: dict[str, Any] = {}, ) -> tuple[pandas.DataFrame, int]: """Match detections to Gaia sources using nearest neighbours. @@ -481,6 +484,9 @@ def match_with_gaia( concat If `True`, the returned data frame is the input ``sources`` concatenated with the match information. + db_connection_params + Database connection parameters to pass to `.get_gaia_sources` if + ``gaia_sources`` are not passed. Returns ------- @@ -499,7 +505,7 @@ def match_with_gaia( epoch_diff = epoch - GAIA_EPOCH if gaia_sources is None: - gaia_sources = get_gaia_sources(wcs) + gaia_sources = get_gaia_sources(wcs, db_connection_params=db_connection_params) sources = sources.copy() gaia_sources = gaia_sources.copy() @@ -532,19 +538,20 @@ def match_with_gaia( # Get Gaia rows for the valid matches. Change their indices to those # of their matching sources (which are 0..len(sources)-1 since we reindexed). - matches: pandas.DataFrame = gaia_sources.iloc[ii[valid]] + matches: pandas.DataFrame = gaia_sources.iloc[ii[valid]].copy() assert isinstance(matches, pandas.DataFrame) matches.index = pandas.Index(numpy.arange(len(ii))[valid]) - dx = matches.xpix - sources.x - dy = matches.ypix - sources.y - matches["match_sep"] = numpy.hypot(dx, dy) * PIXSCALE + dx = (matches.xpix - sources.x).astype(numpy.float32) + dy = (matches.ypix - sources.y).astype(numpy.float32) + match_sep = cast(pandas.Series, numpy.hypot(dx, dy) * PIXSCALE) + matches.loc[:, "match_sep"] = match_sep.loc[matches.index] - matches.drop(columns=["xpix", "ypix"], inplace=True) + matches = matches.drop(columns=["xpix", "ypix"]) if concat: - sources.loc[matches.index, matches.columns] = matches + sources.loc[:, matches.columns] = matches return sources, valid.sum() return matches, valid.sum()