From 7a71a9152bb0687e01985e58ad1beae10d0546a7 Mon Sep 17 00:00:00 2001 From: Sebastian Luque Date: Tue, 11 May 2021 21:59:48 -0500 Subject: [PATCH] Implemented generalized 2- or 3-process Poisson mixture analyses Also moved all documentation to roxygen. --- .Rbuildignore | 1 + DESCRIPTION | 13 +- NAMESPACE | 41 +- NEWS | 13 + R/AllClass.R | 211 ++++++- R/AllGenerics.R | 45 +- R/AllMethod.R | 1061 ++++++++++++++++++++++++++++++++-- R/austFilter.R | 194 +++++-- R/bouts.R | 701 +++------------------- R/bouts_helpers.R | 199 +++++++ R/calibrate.R | 409 +++++++++++-- R/detDive.R | 48 +- R/detPhase.R | 61 +- R/distSpeed.R | 50 +- R/diveMove-defunct.R | 32 + R/diveMove-deprecated.R | 89 +++ R/diveStats.R | 114 +++- R/oneDiveStats.R | 4 + R/readLocs.R | 64 +- R/readTDR.R | 1 + R/runquantile.R | 182 ++++++ R/stampDive.R | 7 + man/Bouts-class.Rd | 42 ++ man/TDR-class.Rd | 96 ++- man/TDRcalibrate-class.Rd | 197 +++---- man/austFilter.Rd | 234 ++++---- man/bout-methods.Rd | 98 ---- man/bout-misc.Rd | 165 ------ man/boutfreqs.Rd | 38 ++ man/boutinit.Rd | 63 ++ man/bouts-internal.Rd | 57 ++ man/bouts2MLE.Rd | 215 ------- man/bouts2NLS.Rd | 135 ----- man/bouts3NLS.Rd | 141 ----- man/boutsBEC.Rd | 34 ++ man/boutsCDF.Rd | 30 + man/boutsMLEll.Rd | 50 ++ man/boutsNLSll.Rd | 37 ++ man/calibrateDepth.Rd | 597 +++++++++---------- man/calibrateSpeed.Rd | 124 ++-- man/createTDR.Rd | 125 ++++ man/detDive-internal.Rd | 69 +-- man/detPhase-internal.Rd | 96 ++- man/distSpeed.Rd | 66 +-- man/diveModel-class.Rd | 110 ++-- man/diveMove-defunct.Rd | 25 + man/diveMove-deprecated.Rd | 71 +++ man/diveMove-package.Rd | 12 +- man/diveStats.Rd | 182 +++--- man/dot-runquantile.Rd | 208 +++++++ man/extractDive-methods.Rd | 65 --- man/extractDive.Rd | 54 ++ man/fitMLEbouts.Rd | 150 +++++ man/fitNLSbouts.Rd | 72 +++ man/labelBouts.Rd | 63 ++ man/plotBouts.Rd | 61 ++ man/plotBoutsCDF.Rd | 57 ++ man/plotDiveModel-methods.Rd | 123 ---- man/plotDiveModel.Rd | 109 ++++ man/plotTDR-methods.Rd | 174 ------ man/plotTDR.Rd | 147 +++++ man/plotZOC-methods.Rd | 137 ----- man/plotZOC.Rd | 103 ++++ man/readLocs.Rd | 123 ++-- man/readTDR.Rd | 122 ---- man/rmixexp.Rd | 43 ++ man/rqPlot.Rd | 103 ++-- man/runquantile-internal.Rd | 245 -------- man/timeBudget-methods.Rd | 72 --- man/timeBudget.Rd | 57 ++ 70 files changed, 5336 insertions(+), 3601 deletions(-) create mode 100644 R/bouts_helpers.R create mode 100644 R/diveMove-defunct.R create mode 100644 R/diveMove-deprecated.R create mode 100644 man/Bouts-class.Rd delete mode 100644 man/bout-methods.Rd delete mode 100644 man/bout-misc.Rd create mode 100644 man/boutfreqs.Rd create mode 100644 man/boutinit.Rd create mode 100644 man/bouts-internal.Rd delete mode 100644 man/bouts2MLE.Rd delete mode 100644 man/bouts2NLS.Rd delete mode 100644 man/bouts3NLS.Rd create mode 100644 man/boutsBEC.Rd create mode 100644 man/boutsCDF.Rd create mode 100644 man/boutsMLEll.Rd create mode 100644 man/boutsNLSll.Rd create mode 100644 man/createTDR.Rd create mode 100644 man/diveMove-defunct.Rd create mode 100644 man/diveMove-deprecated.Rd create mode 100644 man/dot-runquantile.Rd delete mode 100644 man/extractDive-methods.Rd create mode 100644 man/extractDive.Rd create mode 100644 man/fitMLEbouts.Rd create mode 100644 man/fitNLSbouts.Rd create mode 100644 man/labelBouts.Rd create mode 100644 man/plotBouts.Rd create mode 100644 man/plotBoutsCDF.Rd delete mode 100644 man/plotDiveModel-methods.Rd create mode 100644 man/plotDiveModel.Rd delete mode 100644 man/plotTDR-methods.Rd create mode 100644 man/plotTDR.Rd delete mode 100644 man/plotZOC-methods.Rd create mode 100644 man/plotZOC.Rd delete mode 100644 man/readTDR.Rd create mode 100644 man/rmixexp.Rd delete mode 100644 man/runquantile-internal.Rd delete mode 100644 man/timeBudget-methods.Rd create mode 100644 man/timeBudget.Rd diff --git a/.Rbuildignore b/.Rbuildignore index 5808171..72575c8 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -8,3 +8,4 @@ EXAMPLES.* ^.AppleDouble$ ^vignettes/auto$ ^src/\.clang-format$ +man-roxygen diff --git a/DESCRIPTION b/DESCRIPTION index 0d647ef..9ba782c 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Package: diveMove Type: Package Title: Dive Analysis and Calibration -Version: 1.5.4 +Version: 1.6.0 Depends: R (>= 3.5.0), methods, stats4 Suggests: knitr, lattice, pander, rmarkdown, tinytest Imports: geosphere, KernSmooth, plotly, quantreg, uniReg @@ -12,14 +12,17 @@ Description: Utilities to represent, visualize, filter, analyse, and summarize handling location data are also provided. LazyLoad: yes LazyData: no +Encoding: UTF-8 ZipData: no BuildResaveData: no VignetteBuilder: knitr -Collate: AllClass.R AllGenerics.R AllMethod.R austFilter.R bouts.R calibrate.R - detDive.R detPhase.R distSpeed.R diveStats.R oneDiveStats.R plotTDR.R - plotZOC.R readLocs.R readTDR.R runquantile.R speedStats.R stampDive.R - zoc.R zzz.R +Collate: AllClass.R AllGenerics.R AllMethod.R austFilter.R bouts_helpers.R + bouts.R calibrate.R detDive.R detPhase.R distSpeed.R diveStats.R + oneDiveStats.R plotTDR.R plotZOC.R readLocs.R readTDR.R runquantile.R + speedStats.R stampDive.R zoc.R diveMove-deprecated.R + diveMove-defunct.R zzz.R NeedsCompilation: yes License: GPL-3 URL: https://github.com/spluque/diveMove BugReports: https://github.com/spluque/diveMove/issues +RoxygenNote: 7.1.1 diff --git a/NAMESPACE b/NAMESPACE index 565ff16..a031f2b 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -10,7 +10,8 @@ importFrom(grDevices, colorRampPalette, contourLines, dev.off, hsv, rgb) importFrom(plotly, plot_ly, add_lines, add_markers, subplot, "%>%") importFrom(stats, approx, approxfun, bw.nrd, ecdf, lm, median, nls, - nls.control, predict, quantile, sd, smooth.spline, splinefun) + nls.control, predict, quantile, rexp, sd, smooth.spline, + splinefun, stepfun) importFrom(utils, read.csv) ## bkde2d and rq for speed calibration @@ -27,35 +28,46 @@ export("grpSpeedFilter", "logit", "unLogit", "boutfreqs", - "boutinit", - "bouts2.nlsFUN", - "bouts2.nls", - "bouts3.nlsFUN", - "bouts3.nls", - "labelBouts", - "bouts2.mleFUN", - "bouts2.ll", - "bouts2.LL", - "bouts.mle", + ## "boutsCDF", "calibrateDepth", "calibrateSpeed", "rqPlot", "distSpeed", "diveStats", "oneDiveStats", - "plotBouts2.cdf", "readLocs", "readTDR", + "rmixexp", "createTDR", "stampDive") +## Deprecated functions +export("bouts2.nlsFUN", + "bouts2.nls", + "bec2", + "bec3", + "bouts3.nlsFUN", + "bouts3.nls", + "bouts2.mleFUN") +## Defunct functions +export("bouts2.ll", + "bouts2.LL", + "bouts.mle") exportClasses("TDR", "TDRspeed", "TDRcalibrate", + "Bouts", "diveModel") exportMethods("as.data.frame", "as.TDRspeed", + "bec", + "boutinit", + "fitNLSbouts", + "fitMLEbouts", + "plotBouts", + "plotBoutsCDF", + "labelBouts", "timeBudget", "coerce", "depth<-", @@ -78,7 +90,4 @@ exportMethods("as.data.frame", "getTDR", "getTime", "getSpeedCoef", - "getSpeed", - "plotBouts", - "bec2", - "bec3") + "getSpeed") diff --git a/NEWS b/NEWS index 22e9a4c..9436320 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,16 @@ +Changes in version 1.6.0: + + o Analyses for 2- and 3-process Poisson mixtures are now fully + implemented. The bout analysis module has a new API, which includes a + new S4 class `Bouts' for representing Poisson process mixtures, and S4 + methods and functions for performing related analyses. As a result, + most functions for bout analyses in previous diveMove versions have + been deprecated or are defunct. Please see + help("diveMove-deprecated") and help("diveMove-defunct") for details. + + o New function rmixexp() for sampling from mixtures of exponential + distributions, as parameterized in Langton et al. (1990). + Changes in version 1.5.4: o Internal function .speedStats() now guarantees numeric matrix output, diff --git a/R/AllClass.R b/R/AllClass.R index 28f434a..d83c2db 100644 --- a/R/AllClass.R +++ b/R/AllClass.R @@ -1,4 +1,33 @@ - +##' Classes "TDR" and "TDRspeed" for representing TDR information +##' +##' These classes store information gathered by time-depth recorders. +##' +##' Since the data to store in objects of these clases usually come from a +##' file, the easiest way to construct such objects is with the function +##' \code{\link{readTDR}} to retrieve all the necessary information. +##' +##' @aliases TDR +##' @slot file Object of class \sQuote{character}, string indicating the +##' file where the data comes from. +##' @slot dtime Object of class \sQuote{numeric}, sampling interval in +##' seconds. +##' @slot time Object of class \code{\link{POSIXct}}, time stamp for every +##' reading. +##' @slot depth Object of class \sQuote{numeric}, depth (m) readings. +##' @slot concurrentData Object of class \code{\link{data.frame}}, optional +##' data collected concurrently. +##' @section Objects from the class: +##' Objects can be created by calls of the form \code{new("TDR", \dots)} +##' and \code{new("TDRspeed", \dots)}. +##' +##' \sQuote{TDR} objects contain concurrent time and depth readings, as +##' well as a string indicating the file the data originates from, and a +##' number indicating the sampling interval for these data. +##' \sQuote{TDRspeed} extends \sQuote{TDR} objects containing additional +##' concurrent speed readings. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @seealso \code{\link{readTDR}}, \code{\link{TDRcalibrate}}. +##' @keywords classes setClass("TDR", slots=c(file="character", dtime="numeric", time="POSIXct", depth="numeric", concurrentData="data.frame"), @@ -25,6 +54,9 @@ setClass("TDR", }) .speedNames <- c("velocity", "speed") + +##' @describeIn TDR Class \code{TDRspeed} +##' @aliases TDRspeed setClass("TDRspeed", contains="TDR", validity=function(object) { ccData <- object@concurrentData @@ -38,6 +70,80 @@ setClass("TDRspeed", contains="TDR", return(TRUE) }) +##' Class "TDRcalibrate" for dive analysis +##' +##' This class holds information produced at various stages of dive +##' analysis. Methods are provided for extracting data from each slot. +##' +##' This is perhaps the most important class in diveMove, as it holds all +##' the information necessary for calculating requested summaries for a +##' TDR. +##' +##' @aliases TDRcalibrate +##' @slot call Object of class \code{\link{call}}. The matched call to the +##' function that created the object. +##' @slot tdr Object of class \code{\link{TDR}}. This slot contains the +##' time, zero-offset corrected depth, and possibly a data frame. If +##' the object is also of class "TDRspeed", then the data frame might +##' contain calibrated or uncalibrated speed. See +##' \code{\link{readTDR}} and the accessor function +##' \code{\link{getTDR}} for this slot. +##' @slot gross.activity Object of class \sQuote{list}. This slot holds a +##' list of the form returned by \code{\link{.detPhase}}, composed of 4 +##' elements. It contains a vector (named \code{phase.id}) numbering +##' each major activity phase found in the record, a factor (named +##' \code{activity}) labelling each row as being dry, wet, or trivial +##' wet activity. These two elements are as long as there are rows in +##' \code{tdr}. This list also contains two more vectors, named +##' \code{begin} and \code{end}: one with the beginning time of each +##' phase, and another with the ending time; both represented as +##' \code{\link{POSIXct}} objects. See \code{\link{.detPhase}}. +##' @slot dive.activity Object of class \code{\link{data.frame}}. This +##' slot contains a \code{\link{data.frame}} of the form returned by +##' \code{\link{.detDive}}, with as many rows as those in \code{tdr}, +##' consisting of three vectors named: \code{dive.id}, which is an +##' integer vector, sequentially numbering each dive (rows that are not +##' part of a dive are labelled 0), dive.activity is a factor which +##' completes that in \code{activity} above, further identifying rows +##' in the record belonging to a dive. The third vector in +##' \code{dive.activity} is an integer vector sequentially numbering +##' each postdive interval (all rows that belong to a dive are labelled +##' 0). See \code{\link{.detDive}}, and \code{\link{getDAct}} to +##' access all or any one of these vectors. +##' @slot dive.phases Object of class \sQuote{factor}. This slot is a +##' factor that labels each row in the record as belonging to a +##' particular phase of a dive. It has the same form as the +##' \dQuote{phase.labels} component of the list returned by +##' \code{\link{.labDivePhase}}. +##' @slot dive.models Object of class \sQuote{list}. This slot contains +##' the details of the process of dive phase identification for each +##' dive. It has the same form as the \code{dive.models} component of +##' the list returned by \code{\link{.labDivePhase}}. It has as many +##' components as there are dives in the \code{\link{TDR}} object, each +##' of them of class \code{\link{diveModel}}. +##' @slot dry.thr Object of class \sQuote{numeric}. The temporal criteria +##' used for detecting dry periods that should be considered as wet. +##' @slot wet.thr Object of class \sQuote{numeric} the temporal criteria +##' used for detecting periods wet that should not be considered as +##' foraging time. +##' @slot dive.thr Object of class \sQuote{numeric}. The temporal criteria +##' used for detecting periods wet that should not be considered as +##' foraging time. +##' @slot speed.calib.coefs Object of class \sQuote{numeric}. The +##' intercept and slope derived from the speed calibration procedure. +##' Defaults to c(0, 1) meaning uncalibrated speeds. +##' @section Objects from the Class: +##' +##' Objects can be created by calls of the form \code{new("TDRcalibrate", +##' \dots{})}. The objects of this class contain information necessary to +##' divide the record into sections (e.g. dry/water), dive/surface, and +##' different sections within dives. They also contain the parameters used +##' to calibrate speed and criteria to divide the record into phases. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @seealso \code{\link{TDR}} for links to other classes in the package. +##' \code{\link{TDRcalibrate-methods}} for the various methods +##' available. +##' @keywords classes setClass("TDRcalibrate", slots=c(call="call", tdr="TDR", gross.activity="list", dive.activity="data.frame", dive.phases="factor", @@ -72,6 +178,76 @@ setOldClass("smooth.spline") setOldClass("bSpline") setClassUnion("dive.spline", c("smooth.spline", "bSpline")) setOldClass("xyVector") + +##' Class "diveModel" for representing a model for identifying dive phases +##' +##' Details of model used to identify the different phases of a dive. +##' @aliases diveModel +##' @slot label.matrix Object of class \code{"matrix"}. A 2-column +##' character matrix with row numbers matching each observation to the +##' full \code{\link{TDR}} object, and a vector labelling the phases of +##' each dive. +##' @slot model Object of class \code{"character"}. A string identifying +##' the specific model fit to dives for the purpose of dive phase +##' identification. It should be one of \sQuote{smooth.spline} or +##' \sQuote{unimodal}. +##' @slot dive.spline Object of class \code{"smooth.spline"}. Details of +##' cubic smoothing spline fit (see +##' \code{\link[stats]{smooth.spline}}). +##' @slot spline.deriv Object of class \code{"list"}. A list with the +##' first derivative of the smoothing spline (see +##' \code{\link[stats]{predict.smooth.spline}}). +##' @slot descent.crit Object of class \code{"numeric"}. The index of the +##' observation at which the descent was deemed to have ended (from +##' initial surface observation). +##' @slot ascent.crit Object of class \code{"numeric"}. the index of the +##' observation at which the ascent was deemed to have ended (from +##' initial surface observation). +##' @slot descent.crit.rate Object of class \code{"numeric"}. The rate of +##' descent corresponding to the critical quantile used. +##' @slot ascent.crit.rate Object of class \code{"numeric"}. The rate of +##' ascent corresponding to the critical quantile used. +##' @section Objects from the Class: +##' Objects can be created by calls of the form \code{new("diveModel", +##' ...)}. +##' +##' \sQuote{diveModel} objects contain all relevant details of the process to +##' identify phases of a dive. Objects of this class are typically generated +##' during depth calibration, using \code{\link{calibrateDepth}}, more +##' specifically \code{\link{.cutDive}}. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @seealso \code{\link{getDiveDeriv}}, \code{\link{plotDiveModel}} +##' @keywords classes +##' @examples +##' showClass("diveModel") +##' +##' \donttest{## Too long for checks +##' ## Continuing the Example from '?calibrateDepth': +##' utils::example("calibrateDepth", package="diveMove", +##' ask=FALSE, echo=FALSE, run.donttest=TRUE) +##' dcalib # the 'TDRcalibrate' that was created +##' +##' ## Compare dive models for dive phase detection +##' diveNo <- 255 +##' diveX <- as.data.frame(extractDive(dcalib, diveNo=diveNo)) +##' diveX.m <- cbind(as.numeric(row.names(diveX[-c(1, nrow(diveX)), ])), +##' diveX$depth[-c(1, nrow(diveX))], +##' diveX$time[-c(1, nrow(diveX))]) +##' +##' ## calibrateDepth() default unimodal regression. Number of inner knots is +##' ## either 10 or the number of samples in the dive, whichever is larger. +##' (phases.uni <- diveMove:::.cutDive(diveX.m, smooth.par=0.2, knot.factor=20, +##' dive.model="unimodal", +##' descent.crit.q=0.01, ascent.crit.q=0)) +##' ## Smoothing spline model, using default smoothing parameter. +##' (phases.spl <- diveMove:::.cutDive(diveX.m, smooth.par=0.2, knot.factor=20, +##' dive.model="smooth.spline", +##' descent.crit.q=0.01, ascent.crit.q=0)) +##' plotDiveModel(phases.spl, +##' diveNo=paste(diveNo, ", smooth.par=", 0.2, sep="")) +##' plotDiveModel(phases.uni, diveNo=paste(diveNo)) +##' +##' } setClass("diveModel", slots=c(label.matrix="matrix", model="character", dive.spline="dive.spline", spline.deriv="xyVector", @@ -97,3 +273,36 @@ setClass("diveModel", }) setOldClass("nls") # For bout methods + +##' Class "Bouts" for representing Poisson mixtures for identification of +##' behavioural bouts +##' +##' Base class for storing key information for modelling and detecting +##' bouts in behavioural data. +##' @aliases Bouts +##' @slot x Object of class \code{"numeric"}. Data to be modelled. +##' @slot method Object of class \code{"character"}. A string indicating +##' the type of frequency to calculate from \code{x}: "standard" or +##' "seq.diff". If "standard", frequencies are calculated directly +##' from \code{x}, and from the sequential differences in \code{x} +##' otherwise. +##' @slot lnfreq Object of class \code{\link{data.frame}}. Columns named +##' \var{lnfreq} (log frequencies) and \var{x} (mid points of histogram +##' bins). +##' @section Objects from the class: +##' Objects can be created most conveniently via the +##' \code{\link{boutfreqs}} function, which sets the \code{lnfreq} slot, +##' but can also be created via \code{new("Bouts")}. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @seealso \code{\link{boutfreqs}} +##' @keywords classes +setClass("Bouts", + slots=c(x="numeric", method="character", lnfreq="data.frame"), + prototype=prototype(method="standard", lnfreq=data.frame()), + validity=function(object) { + meths <- c("standard", "seq.diff") + meths.msg = paste(meths, collapse=", ") + if (! object@method %in% meths) + return(paste("method must be one of:", meths.msg)) + return(TRUE) + }) diff --git a/R/AllGenerics.R b/R/AllGenerics.R index dd20f23..b7542cb 100644 --- a/R/AllGenerics.R +++ b/R/AllGenerics.R @@ -1,5 +1,6 @@ ###_ + Plotting + if (!isGeneric("plotTDR")) { setGeneric("plotTDR", function(x, y, ...) standardGeneric("plotTDR")) @@ -16,7 +17,8 @@ if (!isGeneric("plotDiveModel")) { } if (!isGeneric("plotBouts")) { - setGeneric("plotBouts", function(fit, ...) standardGeneric("plotBouts")) + setGeneric("plotBouts", + function(fit, obj, ...) standardGeneric("plotBouts")) } ###_ + Accessors @@ -103,13 +105,44 @@ if (!isGeneric("timeBudget")) { function(obj, ignoreZ) standardGeneric("timeBudget")) } -###_ + bec2, and bec3 -if (!isGeneric("bec2")) { - setGeneric("bec2", function(fit) standardGeneric("bec2")) +###_ . Bouts + +if (!isGeneric("boutsNLSll")) { + setGeneric("boutsNLSll", + function(obj, coefs) + standardGeneric("boutsNLSll")) +} + +if (!isGeneric("boutinit")) { + setGeneric("boutinit", + function(obj, x.break, plot=TRUE, ...) + standardGeneric("boutinit")) +} + +if (!isGeneric("fitNLSbouts")) { + setGeneric("fitNLSbouts", + function(obj, start, maxiter, ...) + standardGeneric("fitNLSbouts")) +} + +if (!isGeneric("fitMLEbouts")) { + setGeneric("fitMLEbouts", + function(obj, start, ...) + standardGeneric("fitMLEbouts")) +} + +if (!isGeneric("bec")) { + setGeneric("bec", function(fit) standardGeneric("bec")) +} + +if (!isGeneric("labelBouts")) { + setGeneric("labelBouts", + function(obj, becs, ...) standardGeneric("labelBouts")) } -if (!isGeneric("bec3")) { - setGeneric("bec3", function(fit) standardGeneric("bec3")) +if (!isGeneric("plotBoutsCDF")) { + setGeneric("plotBoutsCDF", + function(fit, obj, ...) standardGeneric("plotBoutsCDF")) } diff --git a/R/AllMethod.R b/R/AllMethod.R index 055cd73..2060cef 100644 --- a/R/AllMethod.R +++ b/R/AllMethod.R @@ -28,6 +28,76 @@ setMethod("show", signature=signature(object="TDR"), } }) +##' Methods for plotting objects of class "TDR" and "TDRcalibrate" +##' +##' Main plotting method for objects of these classes. Plot and optionally +##' set zero-offset correction windows in \acronym{TDR} records, with the +##' aid of a graphical user interface (GUI), allowing for dynamic selection +##' of offset and multiple time windows to perform the adjustment. +##' +##' @aliases plotTDR +##' @param x \code{POSIXct} object with date and time, \code{\link{TDR}}, +##' or \code{\link{TDRcalibrate}} object. +##' @param y numeric vector with depth in m. +##' @param concurVars matrix with additional variables in each column to +##' plot concurrently with depth. For the (\code{TDR},\code{missing}) +##' and (\code{TDRcalibrate},\code{missing}) methods, a +##' \code{\link{character}} vector naming additional variables from the +##' \code{concurrentData} slot to plot, if any. +##' @param xlim \code{POSIXct} or numeric vector of length 2, with lower +##' and upper limits of time to be plotted. +##' @param depth.lim numeric vector of length 2, with the lower and upper +##' limits of depth to be plotted. +##' @param ylab.depth character string to label the corresponding y-axes. +##' @param concurVarTitles character vector of titles to label each new +##' variable given in \var{concurVars}. +##' @param sunrise.time,sunset.time character string with time of sunrise +##' and sunset, respectively, in 24 hr format. This is used for +##' shading night time. +##' @param night.col color for shading night time. +##' @param dry.time subset of time corresponding to observations considered +##' to be dry. +##' @param phase.factor factor dividing rows into sections. +##' @param ... Arguments for the \code{(POSIXt,numeric)} method. For +##' \code{(TDRcalibrate,missing)}, these are arguments for the +##' appropriate methods. +##' @param diveNo numeric vector or scalar with dive numbers to plot. +##' @param what character: what aspect of the \code{\link{TDRcalibrate}} to +##' plot, which selects the method to use for plotting. +##' @return If called with the \code{interact} argument set to \code{TRUE}, +##' returns a list (invisibly) with as many components as sections of +##' the record that were zero-offset corrected, each consisting of two +##' further lists with the same components as those returned by +##' \code{\link{locator}}. +##' @author Sebastian P. Luque \email{spluque@@gmail.com}, with many ideas +##' from CRAN package sfsmisc. +##' @seealso \code{\link{calibrateDepth}}, \code{\link{.zoc}} +##' @keywords methods iplot +##' @describeIn plotTDR Base method plotting numeric vector against POSIXt +##' object +##' @examples +##' \donttest{## Too long for checks +##' +##' ## Continuing the Example from '?calibrateDepth': +##' utils::example("calibrateDepth", package="diveMove", +##' ask=FALSE, echo=FALSE, run.donttest=TRUE) +##' ## Use interact=TRUE (default) to set an offset interactively +##' ## Plot the 'TDR' object +##' plotTDR(getTime(divesTDR), getDepth(divesTDR)) +##' plotTDR(divesTDR) +##' +##' ## Plot different aspects of the 'TDRcalibrate' object +##' plotTDR(dcalib) +##' plotTDR(dcalib, diveNo=19:25) +##' plotTDR(dcalib, what="dive.model", diveNo=25) +##' if (dev.interactive(orNone=TRUE)) { +##' ## Add surface observations and interact +##' plotTDR(dcalib, surface=TRUE) +##' ## Plot one dive +##' plotTDR(dcalib, diveNo=200) +##' } +##' +##' } setMethod("plotTDR", signature(x="POSIXt", y="numeric"), function(x, y, concurVars=NULL, xlim=NULL, depth.lim=NULL, ylab.depth="depth (m)", @@ -46,6 +116,8 @@ setMethod("plotTDR", signature(x="POSIXt", y="numeric"), phase.factor=phase.factor) }) +##' @describeIn plotTDR Interactive graphical display of time-depth data, +##' with zooming and panning capabilities. setMethod("plotTDR", signature(x="TDR", y="missing"), function(x, y, concurVars, concurVarTitles, ...) { if (!missing(concurVars)) { @@ -130,6 +202,21 @@ setMethod("show", signature=signature(object="TDRcalibrate"), } do.call(plotTDR, args=ell) } + +##' @describeIn plotTDR plot selected aspects of \code{\link{TDRcalibrate}} +##' object. Currently, two aspects have plotting methods: +##' +##' * \code{phases} (Optional arguments: \code{concurVars}, \code{surface}) +##' Plots all dives, labelled by the activity phase they belong to. It +##' produces a plot consisting of one or more panels; the first panel +##' shows depth against time, and additional panels show other concurrent +##' data in the object. Optional argument \code{concurVars} is a +##' character vector indicating which additional components from the +##' \code{concurrentData} slot to plot, if any. Optional argument +##' \code{surface} is a logical: whether to plot surface readings. +##' +##' * \code{dive.model} Plots the dive model for the selected dive number +##' (\code{diveNo} argument). setMethod("plotTDR", signature(x="TDRcalibrate", y="missing"), function(x, y, what=c("phases", "dive.model"), diveNo=seq(max(getDAct(x, "dive.id"))), ...) { @@ -209,6 +296,61 @@ setMethod("show", signature=signature(object="diveModel"), object@ascent.crit.rate, "\n", sep="") }) +##' Methods for plotting models of dive phases +##' +##' All methods produce a double panel plot. The top panel shows the depth +##' against time, the cubic spline smoother, the identified descent and +##' ascent phases (which form the basis for identifying the rest of the +##' dive phases), while the bottom panel shows the first derivative of the +##' smooth trace. +##' +##' @aliases plotDiveModel +##' @param x A \code{\link{diveModel}} (diveModel,missing method), +##' \code{\link{numeric}} vector of time step observations +##' (numeric,numeric method), or \code{\link{TDRcalibrate}} object +##' (TDRcalibrate,numeric method). +##' @param diveNo integer representing the dive number selected for +##' plotting. +##' @param y numeric vector with depth observations at each time step. +##' @param times.s numeric vector with time steps used to generate the +##' smoothing spline (i.e. the knots, see \code{\link{diveModel}}). +##' @param depths.s numeric vector with smoothed depth (see +##' \code{\link{diveModel}}). +##' @param d.crit integer denoting the index where descent ends in the +##' observed time series (see \code{\link{diveModel}}). +##' @param a.crit integer denoting the index where ascent begins in the +##' observed time series (see \code{\link{diveModel}}). +##' @param times.deriv numeric vector representing the time steps where the +##' derivative of the smoothing spline was evaluated (see +##' \code{\link{diveModel}}). +##' @param depths.deriv numeric vector representing the derivative of the +##' smoothing spline evaluated at \code{times.deriv} (see +##' \code{\link{diveModel}}). +##' @param d.crit.rate numeric scalar: vertical rate of descent +##' corresponding to the quantile used (see \code{\link{diveModel}}). +##' @param a.crit.rate numeric scalar: vertical rate of ascent +##' corresponding to the quantile used (see \code{\link{diveModel}}). +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @seealso \code{\link{diveModel}} +##' @keywords methods +##' @describeIn plotDiveModel Given a \code{\link{diveModel}} object and +##' (possibly) the dive number that it corresponds to, the plot shows +##' the model data. +##' @examples +##' \donttest{## Too long for checks +##' +##' ## Continuing the Example from '?calibrateDepth': +##' utils::example("calibrateDepth", package="diveMove", +##' ask=FALSE, echo=FALSE, run.donttest=TRUE) +##' +##' ## 'diveModel' method +##' dm <- getDiveModel(dcalib, 100) +##' plotDiveModel(dm, diveNo=100) +##' +##' ## 'TDRcalibrate' method +##' plotDiveModel(dcalib, diveNo=100) +##' +##' } setMethod("plotDiveModel", signature(x="diveModel", y="missing"), function(x, diveNo) { if (missing(diveNo)) diveNo <- "Unknown" @@ -230,6 +372,10 @@ setMethod("plotDiveModel", signature(x="diveModel", y="missing"), d.crit.rate=d.crit.rate, a.crit.rate=a.crit.rate) }) +##' @describeIn plotDiveModel Given a \code{\link{TDRcalibrate}} object and +##' a dive number to extract from it, this method plots the observed +##' data and the model. The intended use of this method is through +##' \code{\link{plotTDR}} when \code{what="dive.model"}. setMethod("plotDiveModel", signature(x="TDRcalibrate", y="missing"), function(x, diveNo) { @@ -260,6 +406,8 @@ setMethod("plotDiveModel", d.crit.rate=d.crit.rate, a.crit.rate=a.crit.rate) }) +##' @describeIn plotDiveModel Base method, requiring all aspects of the +##' model to be provided. setMethod("plotDiveModel", signature(x="numeric", y="numeric"), function(x, y, times.s, depths.s, d.crit, a.crit, diveNo=1, @@ -308,49 +456,760 @@ setMethod("plotDiveModel", labels=c("descent", "ascent"), pos=1, cex=0.7) }) -###_ . plotBouts -setMethod("plotBouts", signature(fit="nls"), - function(fit, ...) { - ncoefs <- as.character(length(coef(fit))) - if (! (ncoefs == "4" || ncoefs == "6")) { - msg <- paste("fitted model must have 4 (2-process) or", - "6 (3-process) coefficients") - stop(msg) +###_ . Bouts + +##' Generalized log likelihood function taking any number of Poisson +##' processes in a "broken-stick" model +##' +##' @aliases boutsNLSll +##' @param obj Object of class \code{\link{Bouts}} or numeric vector of +##' independent data to be described by the function. +##' @param coefs matrix of coefficients (\code{a} and \code{lambda}) in +##' rows for each process of the model in columns. +##' @return numeric vector as \code{x} with the evaluated function. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @describeIn boutsNLSll Log likelihood \code{Bouts} method +setMethod("boutsNLSll", signature(obj="Bouts"), + function(obj, coefs) { + x <- obj@x + boutsNLSll(x, coefs=coefs) + }) + +##' @describeIn boutsNLSll Log likelihood function \code{numeric} method +setMethod("boutsNLSll", signature(obj="numeric"), + function(obj, coefs) { + calc_term <- function(params) { + params[1] * params[2] * exp(-params[2] * obj) } - switch(ncoefs, - "4" = { - plotBouts2.nls(fit=fit, - lnfreq=eval.parent(fit$data), ...) - }, - "6" = { - plotBouts3.nls(fit=fit, - lnfreq=eval.parent(fit$data), ...) - }) + terms <- apply(coefs, 2, calc_term) + if (is.vector(terms)) { + log(sum(terms)) + } else{ log(apply(terms, 1, sum)) } + }) + +##' Fit "broken stick" model to log frequency data for identification of +##' bouts of behaviour +##' +##' Fits "broken stick" model to the log frequencies modelled as a function +##' of \var{x} (well, the midpoints of the binned data), using chosen +##' value(s) to separate the two or three processes. +##' +##' @aliases boutinit +##' @param obj Object of class \code{\link{Bouts}} or +##' \code{\link{data.frame}}. +##' @param x.break Numeric vector of length 1 or 2 with \code{x} value(s) +##' defining the break(s) point(s) for broken stick model, such that +##' \code{x} < \code{x.break}[1] is 1st process, and \code{x} >= +##' \code{x.break}[1] & \code{x} < \code{x.break}[2] is 2nd one, and +##' \code{x} >= \code{x.break}[2] is 3rd one. +##' @param plot logical, whether to plot results or not. +##' @param ... arguments passed to \code{\link{plot}} (must exclude +##' \code{type}). +##' @return (2,N) matrix with as many columns as the number of processes +##' implied by \code{x.break} (i.e. \code{length(x.break) + 1}). Rows +##' are named \code{a} and \code{lambda}, corresponding to starting +##' values derived from broken stick model. A plot is produced as a +##' side effect if argument \code{plot} is \code{TRUE}. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @describeIn boutinit Fit "broken-stick" model on \code{data.frame} +##' object +##' @examples +##' ## 2-process +##' utils::example("rmixexp", package="diveMove", ask=FALSE) +##' ## 'rndproc2' is a random sample vector from the example +##' xbouts2 <- boutfreqs(rndprocs2, 5) # Bouts class result +##' (startval2 <- boutinit(xbouts2, 80)) +##' +##' ## 3-process +##' ## 'rndproc3' is a random sample vector from the example +##' xbouts3 <- boutfreqs(rndprocs3, 5) +##' (startval3 <- boutinit(xbouts3, c(75, 220))) +setMethod("boutinit", signature(obj="data.frame"), + function(obj, x.break, plot=TRUE, ...) { + nproc <- length(x.break) + if (nproc > 2) stop ("x.break must be of length 1 or 2") + procf <- cut(obj$x, breaks=c(min(obj$x), x.break, + max(obj$x)), + include.lowest=TRUE, right=TRUE) + coefs <- by(obj, procf, function(k) { + coef(lm(lnfreq ~ x, k))}) + pars <- lapply(coefs, function(p) { + lambda <- as.vector(-p[2]) + a <- as.vector(exp(p[1]) / lambda) + c(a=a, lambda=lambda) + }) + parsmat <- matrix(unlist(pars), nrow=2, ncol=length(pars)) + dimnames(parsmat) <- list(c("a", "lambda"), names(pars)) + if (plot) { + requireNamespace("lattice", quietly=TRUE) || + stop("lattice package is not available") + panelFUN <- function(x, y, ..., pars=parsmat, ab=coefs) { + lattice::panel.xyplot(x, y, ...) + "procFun" <- function(x) { boutsNLSll(x, pars) } + lattice::panel.curve(procFun, min(x), + max(x), add=TRUE) + for (l in seq_len(length(ab))) { + lattice::panel.abline(ab[[l]], lty=l) + } + } + pp <- lattice::xyplot(lnfreq ~ x, obj, groups=procf, + pars=parsmat, panel=panelFUN, ...) + print(pp) + } + parsmat }) -setMethod("plotBouts", signature(fit="mle"), - function(fit, x, ...) { - ncoefs <- as.character(length(coef(fit))) - if (! (ncoefs == "3" || ncoefs == "5")) { - msg <- paste("fitted model must have 3 (2-process) or", - "5 (3-process) coefficients") - stop(msg) + +##' @describeIn boutinit Fit "broken-stick" model on \code{Bouts} object +setMethod("boutinit", signature(obj="Bouts"), + function(obj, x.break, plot=TRUE, ...) { + lnfreq <- obj@lnfreq + boutinit(lnfreq, x.break=x.break, plot=plot, ...) + }) + +##' Fit mixture of Poisson Processes to Log Frequency data via Non-linear +##' Least Squares regression +##' +##' Methods for modelling a mixture of 2 or 3 random Poisson processes to +##' histogram-like data of log frequency vs interval mid points. This +##' follows Sibly et al. (1990) method. +##' +##' @aliases fitNLSbouts +##' @param obj Object of class \code{\link{Bouts}}, or +##' \code{\link{data.frame}} with named components \var{lnfreq} (log +##' frequencies) and corresponding \var{x} (mid points of histogram +##' bins). +##' @param start,maxiter Arguments passed to \code{\link{nls}}. +##' @param ... Optional arguments passed to \code{\link{nls}}. +##' @return \code{nls} object resulting from fitting this model to data. +##' @references +##' Sibly, R.; Nott, H. and Fletcher, D. (1990) Splitting behaviour into +##' bouts Animal Behaviour \bold{39}, 63-69. +##' @seealso \code{fitMLEbouts} for a better approach; +##' \code{\link{boutfreqs}}; \code{\link{boutinit}} +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @keywords models manip +##' @describeIn fitNLSbouts Fit NLS model on \code{data.frame} +##' @examples +##' ## Run example to retrieve random samples for two- and three-process +##' ## Poisson mixtures with known parameters as 'Bouts' objects +##' ## ('xbouts2', and 'xbouts3'), as well as starting values from +##' ## broken-stick model ('startval2' and 'startval3') +##' utils::example("boutinit", package="diveMove", ask=FALSE) +##' +##' ## 2-process +##' bout2.fit <- fitNLSbouts(xbouts2, start=startval2, maxiter=500) +##' summary(bout2.fit) +##' bec(bout2.fit) +##' +##' ## 3-process +##' ## The problem requires using bound constraints, which is available +##' ## via the 'port' algorithm +##' l_bnds <- c(100, 1e-3, 100, 1e-3, 100, 1e-6) +##' u_bnds <- c(5e4, 1, 5e4, 1, 5e4, 1) +##' bout3.fit <- fitNLSbouts(xbouts3, start=startval3, maxiter=500, +##' lower=l_bnds, upper=u_bnds, algorithm="port") +##' plotBouts(bout3.fit, xbouts3) +setMethod("fitNLSbouts", signature(obj="data.frame"), + function(obj, start, maxiter, ...) { + dim0 <- dim(start) + + .nlsFUN <- function(x, vcoefs) { + coefs <- matrix(vcoefs, nrow=dim0[1], ncol=dim0[2]) + boutsNLSll(x, coefs) } - switch(ncoefs, + + start0.names <- apply(expand.grid(dimnames(start)), 1, + function(x) paste(x[1], x[2], sep="_")) + start0 <- as.vector(start) + names(start0) <-start0.names + start0 + fit.nls <- nls(lnfreq ~ .nlsFUN(x, coefs), data=obj, + start=list(coefs=start0), + control=nls.control(maxiter=maxiter), ...) + fit.nls + }) + +##' @describeIn fitNLSbouts Fit NLS model on \code{Bouts} object +setMethod("fitNLSbouts", signature(obj="Bouts"), + function(obj, start, maxiter, ...) { + lnfreq <- obj@lnfreq + fitNLSbouts(lnfreq, start=start, maxiter=maxiter) + }) + +##' Calculate bout ending criteria from model coefficients +##' +##' @aliases bec +##' @param fit Object of class \code{nls} or \code{mle}. +##' @return \code{numeric} vector with the bout ending criterion or +##' criteria derived from the model. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @keywords models manip +##' @describeIn boutsBEC Calculate BEC on \code{nls} object +setMethod("bec", signature(fit="nls"), + function(fit) { + coefs <- coef(fit) + coefs2D <- matrix(coefs, 2, length(coefs) / 2) + becs <- rep(NA_real_, ncol(coefs2D) - 1) + for (coln in seq_len(ncol(coefs2D) - 1)) { + procn1 <- coefs2D[, coln] + procn2 <- coefs2D[, coln + 1] + a1.hat <- procn1[1] + lambda1.hat <- procn1[2] + a2.hat <- procn2[1] + lambda2.hat <- procn2[2] + becs[coln] <- (log((a1.hat * lambda1.hat) / + (a2.hat * lambda2.hat)) / + (lambda1.hat - lambda2.hat)) + } + becs + }) + +##' @describeIn boutsBEC Calculate BEC on \code{mle} object +setMethod("bec", signature(fit="mle"), + function(fit) { + coefs <- coef(fit) + ncoefs <- length(coefs) + + switch(as.character(ncoefs), "3" = { - plotBouts2.mle(fit=fit, x=x, ...) + p_hat <- as.vector(coefs[1]) + lambda0_hat <- as.vector(coefs[2]) + lambda1_hat <- as.vector(coefs[3]) + log((p_hat * lambda0_hat) / + ((1 - p_hat) * lambda1_hat)) / + (lambda0_hat - lambda1_hat) }, "5" = { - stop("To be implemented") + p_hat <- as.vector(coefs[1:2]) + p0_hat <- p_hat[1] + p1_hat <- p_hat[2] + lambdas_hat <- as.vector(coefs[3:ncoefs]) + lambda0_hat <- lambdas_hat[1] + lambda1_hat <- lambdas_hat[2] + lambda2_hat <- lambdas_hat[3] + bec0 = (log((p0_hat * lambda0_hat) / + ((1 - p0_hat) * lambda1_hat)) / + (lambda0_hat - lambda1_hat)) + bec1 = (log((p1_hat * lambda1_hat) / + ((1 - p1_hat) * lambda2_hat)) / + (lambda1_hat - lambda2_hat)) + c(bec0, bec1) + }, + stop("Not implemented")) + + }) + +##' Plot fitted Poisson mixture model and data +##' +##' @aliases plotBouts +##' @param fit Object of class \code{nls} or \code{mle}. +##' @param obj Object of class \code{\link{Bouts}}, +##' \code{\link{data.frame}} with columns named \code{lnfreq} and +##' \code{x} (when \code{fit -> nls object}, or numeric vector (valid +##' when \code{fit -> mle object}. +##' @param bec.lty Line type specification for drawing the BEC reference +##' line. +##' @param ... Arguments passed to \code{\link{plot.default}}. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @seealso \code{\link{boutfreqs}}, \code{\link{fitNLSbouts}}, +##' \code{\link{fitMLEbouts}} +##' @keywords methods models plot +##' @describeIn plotBouts Plot fitted \code{nls} model on \code{data.frame} +##' object +setMethod("plotBouts", signature(fit="nls", obj="data.frame"), + function(fit, obj, bec.lty=2, ...) { + lnfreq <- obj # copy for sanity + coefs <- coef(fit) + coefs2D <- matrix(coefs, 2, length(coefs) / 2) + becx <- bec(fit) + plot(lnfreq ~ x, lnfreq, type="n", ...) + curve(boutsNLSll(x, coefs2D), min(lnfreq$x), max(lnfreq$x), + add=TRUE) + rgbgray <- 190 / 255 + points(lnfreq ~ x, lnfreq, pch=20, + col=rgb(rgbgray, rgbgray, rgbgray, alpha=0.4 * 255, + maxColorValue=255)) + becy <- predict(fit, list(x=becx)) + points(becx, becy, pch=25, col=rgb(1, 0, 0), bg=rgb(1, 0, 0)) + usr <- par("usr") + arrows(becx, becy, becx, usr[3], code=0, lty=bec.lty) + for (bec.i in seq_len(length(becx))) { + text(becx[bec.i], becy[bec.i], pos=4, offset=1, + paste(paste("bec_", bec.i - 1, "=", sep=""), + round(becx[bec.i], 2), sep=""), + bty="n", cex=0.8) + } + }) + +##' @describeIn plotBouts Plot fitted \code{nls} model on \code{Bouts} +##' object +setMethod("plotBouts", signature(fit="nls", obj="Bouts"), + function(fit, obj, bec.lty=2, ...) { + lnfreq <- obj@lnfreq + plotBouts(fit, obj=lnfreq, bec.lty=bec.lty, ...) + }) + +##' @describeIn plotBouts Plot fitted \code{mle} model on \code{numeric} +##' object +##' @param xlab,ylab Label for x and y axis, respectively. +setMethod("plotBouts", signature(fit="mle", obj="numeric"), + function(fit, obj, xlab="x", ylab="Log Frequency", + bec.lty=2, ...) { + x <- obj # copy for sanity + coefs <- coef(fit) + ncoefs <- length(coefs) + becx <- bec(fit) + range.x <- range(x, na.rm=TRUE) + llfun <- ifelse(ncoefs == 3, .bouts2MLEll, .bouts3MLEll) + + if (ncoefs == 3) { + p_hat <- as.vector(coefs[1]) + lambda0_hat <- as.vector(coefs[2]) + lambda1_hat <- as.vector(coefs[3]) + curve(llfun(x, p=p_hat, lambda0=lambda0_hat, + lambda1=lambda1_hat), + from=range.x[1], to=range.x[2], xlab=xlab, + ylab=ylab, xaxs="i", yaxs="i", ...) + becy <- llfun(becx, p=p_hat, lambda0=lambda0_hat, + lambda1=lambda1_hat) + } else { + p_hat <- as.vector(coefs[1:2]) + p0_hat <- p_hat[1] + p1_hat <- p_hat[2] + lambdas_hat <- as.vector(coefs[3:ncoefs]) + lambda0_hat <- lambdas_hat[1] + lambda1_hat <- lambdas_hat[2] + lambda2_hat <- lambdas_hat[3] + curve(llfun(x, p0=p0_hat, p1=p1_hat, lambda0=lambda0_hat, + lambda1=lambda1_hat, lambda2=lambda2_hat), + from=range.x[1], to=range.x[2], xlab=xlab, + ylab=ylab, xaxs="i", yaxs="i", ...) + becy <- llfun(becx, p0=p0_hat, p1=p1_hat, + lambda0=lambda0_hat, + lambda1=lambda1_hat, + lambda2=lambda2_hat) + } + rug(jitter(x), side=3, ticksize=0.015, quiet=TRUE) + usr <- par("usr") + points(becx, becy, pch=25, col=rgb(1, 0, 0), bg=rgb(1, 0, 0)) + arrows(becx, becy, becx, usr[3], code=0, lty=bec.lty) + for (bec.i in seq_len(length(becx))) { + text(becx[bec.i], becy[bec.i], pos=4, offset=1, + paste(paste("bec_", bec.i - 1, "=", sep=""), + round(becx[bec.i], 2), sep=""), + bty="n", cex=0.8) + } + }) + +##' @describeIn plotBouts Plot fitted \code{mle} model on \code{Bouts} +##' object +setMethod("plotBouts", signature(fit="mle", obj="Bouts"), + function(fit, obj, xlab="x", ylab="Log Frequency", + bec.lty=2, ...) { + x <- obj@x + plotBouts(fit, obj=x, xlab=xlab, ylab=ylab, + bec.lty=bec.lty, ...) + }) + + +".plotECDF" <- function(xpred.exp, ypred, xlim, ...) { + ## Helper function for plotting the ECDF + if (missing(xlim)) { + rx <- range(xpred.exp) + dr <- max(0.07 * diff(rx), median(diff(xpred.exp))) + xlim <- rx + dr * c(1e-3, 1) + } + plot(stepfun(xpred.exp, c(ypred[1], ypred)), do.points=FALSE, + verticals=TRUE, xlim=xlim, las=1, xaxs="i", log="x", ...) +} + + +".plotCDF" <- function(fit, xpred.exp, pars.l, draw.bec, bec.lty) { + ## Helper function for plotting the deterministic CDF + plot(function(x) { + boutsCDF(x, pars.l[["p"]], pars.l[["lambdas"]]) + }, min(xpred.exp), max(xpred.exp), add=TRUE) + if (draw.bec) { + becx <- bec(fit) + becy <- boutsCDF(becx, pars.l[["p"]], pars.l[["lambdas"]]) + bec.col <- rgb(1, 0, 0) + points(becx, becy, pch=25, col=bec.col, bg=bec.col) + arrows(becx, becy, becx, 0, code=0, lty=bec.lty) + for (bec.i in seq_len(length(becx))) { + text(becx[bec.i], becy[bec.i], pos=4, offset=1, + paste(paste("bec_", bec.i - 1, "=", sep=""), + round(becx[bec.i], 2), sep=""), + bty="n", cex=0.8) + } + } +} + +##' Plot empirical and deterministic cumulative frequency distribution +##' Poisson mixture data and model +##' +##' @aliases plotBoutsCDF +##' @param fit Object of class \code{nls} or \code{mle}. +##' @param obj Object of class \code{\link{Bouts}}. +##' @param xlim 2-length vector with limits for the x axis. If omitted, a +##' sensible default is calculated. +##' @param draw.bec logical; whether to draw the BECs +##' @param bec.lty Line type specification for drawing the BEC reference +##' line. +##' @param ... Arguments passed to \code{\link{plot.default}}. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @keywords methods models plot +##' @describeIn plotBoutsCDF Plot (E)CDF on \code{\link{nls}} fit object +##' and numeric vector +setMethod("plotBoutsCDF", signature(fit="nls", obj="numeric"), + function(fit, obj, xlim, draw.bec=FALSE, bec.lty=2, ...) { + x <- log1p(obj) + x.ecdf <- ecdf(x) + xpred <- seq(min(x), max(x), length.out=101) + ypred <- x.ecdf(xpred) + xpred.exp <- expm1(xpred) + .plotECDF(xpred.exp, ypred, xlim, ...) + ## Prepare p to plot estimated CDF + coefs <- coef(fit) + coefs2D <- matrix(coefs, 2, length(coefs) / 2) + p_hat <- calc.p(coefs2D) + ## parse p and lambda into list + pars.l <- build.p.lambda(c(p_hat, coefs2D[2, ])) + .plotCDF(fit, xpred.exp=xpred.exp, pars.l=pars.l, + draw.bec=draw.bec, bec.lty=bec.lty) + }) + +##' @describeIn plotBoutsCDF Plot (E)CDF on \code{\link{nls}} fit object +##' and \code{\link{Bouts}} object +setMethod("plotBoutsCDF", signature(fit="nls", obj="Bouts"), + function(fit, obj, xlim, draw.bec=FALSE, bec.lty=2, ...) { + x <- obj@x + plotBoutsCDF(fit, obj=x, xlim=xlim, draw.bec=draw.bec, + bec.lty=bec.lty, ...) + }) + +##' @describeIn plotBoutsCDF Plot (E)CDF on numeric vector +setMethod("plotBoutsCDF", signature(fit="mle", obj="numeric"), + function(fit, obj, xlim, draw.bec=FALSE, bec.lty=2, ...) { + x <- log1p(obj) + x.ecdf <- ecdf(x) + xpred <- seq(min(x), max(x), length.out=101) + ypred <- x.ecdf(xpred) + xpred.exp <- expm1(xpred) + .plotECDF(xpred.exp, ypred, xlim, ...) + ## Prepare parameters for plotting CDF + coefs <- coef(fit) + ncoefs <- length(coefs) + pars.l <- build.p.lambda(coefs) # parse p and lambda into list + .plotCDF(fit, xpred.exp=xpred.exp, pars.l=pars.l, + draw.bec=draw.bec, bec.lty=bec.lty) + }) + +##' @describeIn plotBoutsCDF Plot (E)CDF on \code{\link{mle}} fit object +setMethod("plotBoutsCDF", signature(fit="mle", obj="Bouts"), + function(fit, obj, xlim, draw.bec=FALSE, bec.lty=2, ...) { + x <- obj@x + plotBoutsCDF(fit, obj=x, xlim=xlim, draw.bec=draw.bec, + bec.lty=bec.lty, ...) + }) + +##' Maximum Likelihood Model of mixtures of 2 or 3 Poisson Processes +##' +##' Functions to model a mixture of 2 random Poisson processes to identify +##' bouts of behaviour. This follows Langton et al. (1995). +##' +##' Mixtures of 2 or 3 Poisson processes are supported. Even in this +##' relatively simple case, it is very important to provide good starting +##' values for the parameters. +##' +##' One useful strategy to get good starting parameter values is to proceed +##' in 4 steps. First, fit a broken stick model to the log frequencies of +##' binned data (see \code{\link{boutinit}}), to obtain estimates of 4 +##' parameters in a 2-process model (Sibly et al. 1990), or 6 in a +##' 3-process model. Second, calculate parameter(s) \var{p} from the alpha +##' parameters obtained from the broken stick model, to get tentative +##' initial values as in Langton et al. (1995). Third, obtain MLE estimates +##' for these parameters, but using a reparameterized version of the -log +##' L2 function. Lastly, obtain the final MLE estimates for the 3 +##' parameters by using the estimates from step 3, un-transformed back to +##' their original scales, maximizing the original parameterization of the +##' -log L2 function. +##' +##' \code{\link{boutinit}} can be used to perform step 1. Calculation of +##' the mixing parameters \var{p} in step 2 is trivial from these +##' estimates. Function \code{\link{boutsMLEll.chooser}} defines a +##' reparameterized version of the -log L2 function given by Langton et +##' al. (1995), so can be used for step 3. This uses a logit (see +##' \code{\link{logit}}) transformation of the mixing parameter \var{p}, +##' and log transformations for both density parameters \var{lambda1} and +##' \var{lambda2}. Function \code{\link{boutsMLEll.chooser}} can be used +##' again to define the -log L2 function corresponding to the +##' un-transformed model for step 4. +##' +##' \code{fitMLEbouts} is the function performing the main job of +##' maximizing the -log L2 functions, and is essentially a wrapper around +##' \code{\link[stats4]{mle}}. It only takes the -log L2 function, a list +##' of starting values, and the variable to be modelled, all of which are +##' passed to \code{\link[stats4]{mle}} for optimization. Additionally, +##' any other arguments are also passed to \code{\link[stats4]{mle}}, hence +##' great control is provided for fitting any of the -log L2 functions. +##' +##' In practice, step 3 does not pose major problems using the +##' reparameterized -log L2 function, but it might be useful to use method +##' \dQuote{L-BFGS-B} with appropriate lower and upper bounds. Step 4 can +##' be a bit more problematic, because the parameters are usually on very +##' different scales and there can be multiple minima. Therefore, it is +##' almost always the rule to use method \dQuote{L-BFGS-B}, again bounding +##' the parameter search, as well as passing a \code{control} list with +##' proper \code{parscale} for controlling the optimization. See +##' \code{Note} below for useful constraints which can be tried. +##' @aliases fitMLEbouts +##' @param obj Object of class \code{\link{Bouts}}. +##' @param start passed to \code{\link[stats4]{mle}}. A row- and +##' column-named (2,N) matrix, as returned by \code{\link{boutinit}}. +##' @param optim_opts0 named list of optional arguments passed to +##' \code{\link[stats4]{mle}} for fitting the first model with +##' transformed parameters. +##' @param optim_opts1 named list of optional arguments passed to +##' \code{\link[stats4]{mle}} for fitting the second model with +##' parameters retrieved from the first model, untransformed to +##' original scale. +##' @return An object of class \code{\link[stats4]{mle}}. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @note +##' +##' In the case of a mixture of 2 Poisson processes, useful values for +##' lower bounds for the transformed negative log likelihood +##' reparameterization are \code{c(-2, -5, -10)}. For the un-transformed +##' parameterization, useful lower bounds are \code{rep(1e-08, 3)}. A +##' useful parscale argument for the latter is \code{c(1, 0.1, 0.01)}. +##' However, I have only tested this for cases of diving behaviour in +##' pinnipeds, so these suggested values may not be useful in other cases. +##' +##' The lambdas can be very small for some data, particularly +##' \code{lambda2}, so the default \code{ndeps} in \code{\link{optim}} can +##' be so large as to push the search outside the bounds given. To avoid +##' this problem, provide a smaller \code{ndeps} value. +##' @references +##' Langton, S.; Collett, D. and Sibly, R. (1995) Splitting behaviour into +##' bouts; a maximum likelihood approach. Behaviour \bold{132}, 9-10. +##' +##' Luque, S.P. and Guinet, C. (2007) A maximum likelihood approach for +##' identifying dive bouts improves accuracy, precision, and +##' objectivity. Behaviour, \bold{144}, 1315-1332. +##' +##' Sibly, R.; Nott, H. and Fletcher, D. (1990) Splitting behaviour into +##' bouts. Animal Behaviour \bold{39}, 63-69. +##' @keywords methods models manip +##' @describeIn fitMLEbouts Fit model via MLE on numeric vector. +##' @examples +##' ## Run example to retrieve random samples for two- and three-process +##' ## Poisson mixtures with known parameters as 'Bouts' objects +##' ## ('xbouts2', and 'xbouts3'), as well as starting values from +##' ## broken-stick model ('startval2' and 'startval3') +##' utils::example("boutinit", package="diveMove", ask=FALSE) +##' +##' ## 2-process +##' opts0 <- list(method="L-BFGS-B", lower=c(-2, -5, -10)) +##' opts1 <- list(method="L-BFGS-B", lower=c(1e-1, 1e-3, 1e-6)) +##' bouts2.fit <- fitMLEbouts(xbouts2, start=startval2, optim_opts0=opts0, +##' optim_opts1=opts1) +##' plotBouts(bouts2.fit, xbouts2) +##' +##' ## 3-process +##' opts0 <- list(method="L-BFGS-B", lower=c(-5, -5, -6, -8, -12)) +##' ## We know 0 < p < 1, and can provide bounds for lambdas within an +##' ## order of magnitude for a rough box constraint. +##' lo <- c(9e-2, 9e-2, 2e-3, 1e-3, 1e-5) +##' hi <- c(9e-1, 9.9e-1, 2e-1, 9e-2, 5e-3) +##' ## Important to set the step size to avoid running below zero for +##' ## the last lambda. +##' ndeps <- c(1e-3, 1e-3, 1e-3, 1e-3, 1e-5) +##' opts1 <- list(method="L-BFGS-B", lower=lo, upper=hi, +##' control=list(ndeps=ndeps)) +##' bout3.fit <- fitMLEbouts(xbouts3, start=startval3, optim_opts0=opts0, +##' optim_opts1=opts1) +##' bec(bout3.fit) +##' plotBoutsCDF(bout3.fit, xbouts3) +setMethod("fitMLEbouts", signature(obj="numeric"), + function(obj, start, optim_opts0=NULL, optim_opts1=NULL) { + nproc <- ncol(start) + if (nproc > 3) { + stop("Only mixtures of <= 3 processes are implemented") + } + p0 <- calc.p(start) + ## Transform parameters for first fit + x0 <- c(logit(p0), log(start["lambda", ])) + ## First fit + ll0 <- boutsMLEll.chooser(obj, x0, transformed=TRUE) + fit0.args <- list(ll0, start=x0) + fit0 <- do.call("mle", args=c(fit0.args, optim_opts0)) + ## Second fit + start2.l <- build.p.lambda(coef(fit0)) + start2.l[["p"]] <- unLogit(start2.l[["p"]]) + start2.l[["lambdas"]] <- exp(start2.l[["lambdas"]]) + ll1 <- boutsMLEll.chooser(obj, x0, transformed=FALSE) + fit1.args <- list(ll1, start=unlist(start2.l)) + do.call("mle", args=c(fit1.args, optim_opts1)) + }) + +##' @describeIn fitMLEbouts Fit model via MLE on \code{\link{Bouts}} +##' object. +setMethod("fitMLEbouts", signature(obj="Bouts"), + function(obj, start, optim_opts0=NULL, optim_opts1=NULL) { + x <- obj@x + fitMLEbouts(x, start=start, + optim_opts0=optim_opts0, + optim_opts1=optim_opts1) + }) + +##' Label each vector element or matrix row with bout membership number +##' +##' Identify which bout an observation belongs to. +##' +##' @aliases labelBouts +##' @param obj Object of class \code{\link{Bouts}} object, or numeric +##' vector or matrix with independent data modelled as a Poisson +##' process mixture. +##' @param becs numeric vector or matrix with values for the bout ending +##' criterion which should be compared against the values in x for +##' identifying the bouts. It needs to have the same dimensions as +##' \code{x} to allow for situations where \code{bec} is within +##' \code{x}. +##' @param bec.method character: method used for calculating the +##' frequencies: \dQuote{standard} simply uses x, while +##' \dQuote{seq.diff} uses the sequential differences method. +##' @return \code{labelBouts} returns a numeric vector sequentially +##' labelling each row or element of \var{x}, which associates it with +##' a particular bout. \code{unLogit} and \code{logit} return a numeric +##' vector with the (un)transformed arguments. +##' @keywords methods models manip +##' @describeIn labelBouts Label data on vector or matrix objects. +##' @examples +##' ## Run example to retrieve random samples for two- and three-process +##' ## Poisson mixtures with known parameters as 'Bouts' objects +##' ## ('xbouts2', and 'xbouts3'), as well as starting values from +##' ## broken-stick model ('startval2' and 'startval3') +##' utils::example("boutinit", package="diveMove", ask=FALSE) +##' +##' ## 2-process +##' opts0 <- list(method="L-BFGS-B", lower=c(-2, -5, -10)) +##' opts1 <- list(method="L-BFGS-B", lower=c(1e-1, 1e-3, 1e-6)) +##' bouts2.fit <- fitMLEbouts(xbouts2, start=startval2, optim_opts0=opts0, +##' optim_opts1=opts1) +##' bec2 <- bec(bouts2.fit) +##' ## labelBouts() expects its second argument to have the same +##' ## dimensions as the data +##' labelBouts(xbouts2, becs=rep(bec2, length(xbouts2@x))) +setMethod("labelBouts", signature(obj="numeric"), + function(obj, becs, bec.method=c("standard", "seq.diff")) { + if (!is(obj, "matrix")) obj <- as.matrix(obj) + if (!is(becs, "matrix")) becs <- as.matrix(becs) + if (!identical(dim(obj), dim(becs))) + stop("obj and becs must have the same dimensions") + bec.method <- match.arg(bec.method) + switch(bec.method, + standard = {xx <- obj[-1, ]}, + seq.diff = { + xx <- apply(obj, 2, function(k) abs(diff(k))) }) + testfun <- function(xi, beci) ifelse(xi > beci, 1, 2) + bectest <- mapply(testfun, xx, becs[-1, ]) + dim(bectest) <- dim(xx) + bectest.full <- rbind(1, bectest) + bectest.any <- apply(bectest.full, 1, function(k) any(k < 2)) + chgbout <- which(bectest.any) + boutno <- seq(along=chgbout) + reps <- diff(c(chgbout, nrow(obj) + 1)) + rep(boutno, reps) + }) + +##' @describeIn labelBouts Label data on \code{\link{Bouts}} object +setMethod("labelBouts", signature(obj="Bouts"), + function(obj, becs, bec.method=c("standard", "seq.diff")) { + x <- obj@x + labelBouts(x, becs=becs, bec.method=bec.method) }) ###_ . plotZOC + +##' Methods for visually assessing results of ZOC procedure +##' +##' Plots for comparing the zero-offset corrected depth from a +##' \code{\link{TDRcalibrate}} object with the uncorrected data in a +##' \code{\link{TDR}} object, or the progress in each of the filters during +##' recursive filtering for ZOC (\code{\link{calibrateDepth}}). +##' +##' The \code{TDR},\code{matrix} method produces a plot like those shown in +##' Luque and Fried (2011). +##' +##' The \code{TDR},\code{TDRcalibrate} method overlays the corrected depth +##' from the second argument over that from the first. +##' +##' @aliases plotZOC +##' @param x \code{TDR} object. +##' @param y matrix with the same number of rows as there are observations +##' in \code{x}, or a \code{TDRcalibrate} object. +##' @param xlim \code{POSIXct} or numeric vector of length 2, with lower +##' and upper limits of time to be plotted. Defaults to time range of +##' input. +##' @param ylim numeric vector of length 2 (upper, lower) with axis limits. +##' Defaults to range of input. +##' @param ylab character strings to label the corresponding y-axis. +##' @param ... Arguments passed to \code{\link{legend}}. +##' @return Nothing; a plot as side effect. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @seealso \code{\link{calibrateDepth}}, \code{\link{.zoc}} +##' @references +##' +##' Luque, S.P. and Fried, R. (2011) Recursive filtering for zero offset +##' correction of diving depth time series. PLoS ONE 6:e15850 +##' @keywords methods iplot +##' @describeIn plotZOC This plot helps in finding appropriate parameters +##' for \code{diveMove:::.depthFilter}, and consists of three panels. +##' The upper panel shows the original data, the middle panel shows the +##' filters, and the last panel shows the corrected +##' data. method=\dQuote{visual} in \code{\link{calibrateDepth}}. +##' @examples +##' ## Using the Example from '?diveStats': +##' \donttest{## Too long for checks +##' +##' utils::example("diveStats", package="diveMove", +##' ask=FALSE, echo=FALSE, run.donttest=TRUE) +##' +##' ## Plot filters for ZOC +##' ## Work on first phase (trip) subset, to save processing time, since +##' ## there's no drift nor shifts between trips +##' tdr <- divesTDR[1:15000] +##' ## Try window widths (K), quantiles (P) and bound the search (db) +##' K <- c(3, 360); P <- c(0.5, 0.02); db <- c(0, 5) +##' d.filter <- diveMove:::.depthFilter(depth=getDepth(tdr), +##' k=K, probs=P, depth.bounds=db, +##' na.rm=TRUE) +##' old.par <- par(no.readonly=TRUE) +##' plotZOC(tdr, d.filter, ylim=c(0, 6)) +##' par(old.par) +##' +##' ## Plot corrected and uncorrected depth, regardless of method +##' ## Look at three different scales +##' xlim1 <- c(getTime(divesTDR)[7100], getTime(divesTDR)[11700]) +##' xlim2 <- c(getTime(divesTDR)[7100], getTime(divesTDR)[7400]) +##' xlim3 <- c(getTime(divesTDR)[7100], getTime(divesTDR)[7200]) +##' par(mar=c(3, 4, 0, 1) + 0.1, cex=1.1, las=1) +##' layout(seq(3)) +##' plotZOC(divesTDR, dcalib, xlim=xlim1, ylim=c(0, 6)) +##' plotZOC(divesTDR, dcalib, xlim=xlim2, ylim=c(0, 70)) +##' plotZOC(divesTDR, dcalib, xlim=xlim3, ylim=c(0, 70)) +##' par(old.par) +##' +##' } setMethod("plotZOC", signature(x="TDR", y="matrix"), function(x, y, xlim, ylim, ylab="Depth (m)", ...) { .plotZOCfilters(x=x, zoc.filter=y, xlim=xlim, ylim=ylim, ylab=ylab, ...) }) +##' @describeIn plotZOC This plots depth from the \code{TDRcalibrate} +##' object over the one from the \code{TDR} object. setMethod("plotZOC", signature(x="TDR", y="TDRcalibrate"), function(x, y, xlim, ylim, ylab="Depth (m)", ...) { .plotZOCtdrs(x=x, y=y, xlim=xlim, ylim=ylim, ylab=ylab, ...) @@ -584,19 +1443,82 @@ setMethod("[", signature(x="TDR", i="numeric", j="missing", drop="missing"), ###_ + Generators and Summaries + +##' Read comma-delimited file with "TDR" data +##' +##' Read a delimited (*.csv) file containing time-depth recorder +##' (\dfn{TDR}) data from various \acronym{TDR} models. Return a +##' \code{TDR} or \code{TDRspeed} object. \code{createTDR} creates an +##' object of one of these classes from other objects. +##' +##' The input file is assumed to have a header row identifying each field, +##' and all rows must be complete (i.e. have the same number of fields). +##' Field names need not follow any convention. However, depth and speed +##' are assumed to be in m, and \eqn{m \cdot s^{-1}}{m/s}, respectively, +##' for further analyses. +##' +##' If \var{speed} is TRUE and concurrentCols contains a column named speed +##' or velocity, then an object of class \code{\link{TDRspeed}} is created, +##' where speed is considered to be the column matching this name. +##' +##' @aliases createTDR +##' @param time A \code{POSIXct} object with date and time readings for +##' each reading. +##' @param depth numeric vector with depth readings. +##' @param concurrentData \code{\link{data.frame}} with additional, +##' concurrent data collected. +##' @param speed logical: whether speed is included in one of the columns +##' of concurrentCols. +##' @param dtime numeric scalar: sampling interval used in seconds. If +##' missing, it is calculated from the \code{time} argument. +##' @param file character: a string indicating the path to the file to +##' read. This can also be a text-mode connection, as allowed in +##' \code{\link{read.csv}}. +##' @param dateCol integer: column number containing dates, and optionally, +##' times. +##' @param timeCol integer: column number with times. +##' @param depthCol integer: column number containing depth readings. +##' @param subsamp numeric scalar: subsample rows in \code{file} with +##' \code{subsamp} interval, in s. +##' @param concurrentCols integer vector of column numbers to include as +##' concurrent data collected. +##' @param dtformat character: a string specifying the format in which the +##' date and time columns, when pasted together, should be interpreted +##' (see \code{\link{strptime}}). +##' @param tz character: a string indicating the time zone assumed for the +##' date and time readings. +##' @param ... Passed to \code{\link{read.csv}} +##' @return An object of class \code{\link{TDR}} or \code{\link{TDRspeed}}. +##' @note Although \code{\link{TDR}} and \code{\link{TDRspeed}} classes +##' check that time stamps are in increasing order, the integrity of +##' the input must be thoroughly verified for common errors present in +##' text output from \acronym{TDR} devices such as duplicate records, +##' missing time stamps and non-numeric characters in numeric fields. +##' These errors are much more efficiently dealt with outside of +##' \acronym{GNU} using tools like \code{GNU awk} or \code{GNU sed}, so +##' \code{\link{diveMove}} does not currently attempt to fix these +##' errors. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @keywords manip +##' @examples +##' ## Do example to define object zz with location of dataset +##' utils::example("dives", package="diveMove", +##' ask=FALSE, echo=FALSE) +##' srcfn <- basename(zz) +##' readTDR(zz, speed=TRUE, sep=";", na.strings="", as.is=TRUE) +##' +##' ## Or more pedestrian +##' tdrX <- read.csv(zz, sep=";", na.strings="", as.is=TRUE) +##' date.time <- paste(tdrX$date, tdrX$time) +##' tdr.time <- as.POSIXct(strptime(date.time, format="%d/%m/%Y %H:%M:%S"), +##' tz="GMT") +##' createTDR(tdr.time, tdrX$depth, concurrentData=data.frame(speed=tdrX$speed), +##' file=srcfn, speed=TRUE) "createTDR" <- function(time, depth, concurrentData=data.frame(matrix(ncol=0, nrow=length(time))), speed=FALSE, dtime, file) { - ## Value: An object of TDR or TDRspeed class. Useful to recreate - ## objects once depth has been zoc'ed and speed calibrated for further - ## analyses. - ## -------------------------------------------------------------------- - ## Arguments: see class definitions - ## -------------------------------------------------------------------- - ## Author: Sebastian Luque - ## -------------------------------------------------------------------- if (missing(dtime)) dtime <- .getInterval(time) if(speed) { new("TDRspeed", time=time, depth=depth, concurrentData=concurrentData, @@ -607,6 +1529,35 @@ setMethod("[", signature(x="TDR", i="numeric", j="missing", drop="missing"), } } +##' Extract Dives from "TDR" or "TDRcalibrate" Objects +##' +##' Extract data corresponding to a particular dive(s), referred to by +##' number. +##' +##' @aliases extractDive +##' @param obj \code{\link{TDR}} object. +##' @param diveNo numeric vector or scalar with dive numbers to +##' extract. Duplicates are ignored. +##' @param id numeric vector or scalar of dive numbers from where +##' \code{diveNo} should be chosen. +##' @return An object of class \code{\link{TDR}} or \code{\link{TDRspeed}}. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @keywords methods +##' @describeIn extractDive Extract data on TDR object +##' @examples +##' \donttest{## Too long for checks +##' ## Continuing the Example from '?calibrateDepth': +##' utils::example("calibrateDepth", package="diveMove", +##' ask=FALSE, echo=FALSE, run.donttest=TRUE) +##' dcalib # the 'TDRcalibrate' that was created +##' +##' diveX <- extractDive(divesTDR, 9, getDAct(dcalib, "dive.id")) +##' plotTDR(diveX) +##' +##' diveX <- extractDive(dcalib, 5:10) +##' plotTDR(diveX) +##' +##' } setMethod("extractDive", signature(obj="TDR", diveNo="numeric", id="numeric"), # for TDR object function(obj, diveNo, id) { @@ -627,6 +1578,7 @@ setMethod("extractDive", signature(obj="TDR", diveNo="numeric", } }) +##' @describeIn extractDive Extract data on TDRcalibrate object setMethod("extractDive", # for TDRcalibrate signature(obj="TDRcalibrate", diveNo="numeric", id="missing"), function(obj, diveNo) { @@ -646,6 +1598,41 @@ setMethod("extractDive", # for TDRcalibrate } }) +##' Describe the Time Budget of Major Activities from "TDRcalibrate" +##' object. +##' +##' Summarize the major activities recognized into a time budget. +##' +##' Ignored trivial aquatic periods are collapsed into the enclosing dry +##' period. +##' +##' @aliases timeBudget +##' @param obj \code{\link{TDRcalibrate}} object. +##' @param ignoreZ logical: whether to ignore trivial aquatic periods. +##' @return A \code{\link{data.frame}} with components: +##' +##' \item{phaseno}{A numeric vector numbering each period of activity.} +##' +##' \item{activity}{A factor labelling the period with the corresponding +##' activity.} +##' +##' \item{beg, end}{\code{\link{POSIXct}} objects indicating the beginning +##' and end of each period.} +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @seealso \code{\link{calibrateDepth}} +##' @keywords methods +##' @describeIn timeBudget Base method for computing time budget from +##' TDRcalibrate object +##' @examples +##' \donttest{## Too long for checks +##' ## Continuing the Example from '?calibrateDepth': +##' utils::example("calibrateDepth", package="diveMove", +##' ask=FALSE, echo=FALSE, run.donttest=TRUE) +##' dcalib # the 'TDRcalibrate' that was created +##' +##' timeBudget(dcalib, TRUE) +##' +##' } setMethod("timeBudget", # a table of general attendance pattern signature(obj="TDRcalibrate", ignoreZ="logical"), function(obj, ignoreZ) { @@ -668,10 +1655,6 @@ setMethod("timeBudget", # a table of general attendance pattern }) -###_ + Methods for bec2 and bec3 are in bouts.R - -## This is to avoid Collate issues in DESCRIPTION - ###_ + Emacs local variables ## Local variables: diff --git a/R/austFilter.R b/R/austFilter.R index 9d13295..0ae4687 100644 --- a/R/austFilter.R +++ b/R/austFilter.R @@ -1,4 +1,160 @@ +##' Filter satellite locations +##' +##' Apply a three stage algorithm to eliminate erroneous locations, based +##' on established procedures. +##' +##' These functions implement the location filtering procedure outlined in +##' Austin et al. (2003). \code{grpSpeedFilter} and \code{rmsDistFilter} +##' can be used to perform only the first stage or the second and third +##' stages of the algorithm on their own, respectively. Alternatively, the +##' three filters can be run in a single call using \code{austFilter}. +##' +##' The first stage of the filter is an iterative process which tests every +##' point, except the first and last (\var{w}/2) - 1 (where \var{w} is the +##' window size) points, for travel velocity relative to the +##' preceeding/following (\var{w}/2) - 1 points. If all \var{w} - 1 speeds +##' are greater than the specified threshold, the point is marked as +##' failing the first stage. In this case, the next point is tested, +##' removing the failing point from the set of test points. +##' +##' The second stage runs McConnell et al. (1992) algorithm, which tests +##' all the points that passed the first stage, in the same manner as +##' above. The root mean square of all \var{w} - 1 speeds is calculated, +##' and if it is greater than the specified threshold, the point is marked +##' as failing the second stage (see Warning section below). +##' +##' The third stage is run simultaneously with the second stage, but if the +##' mean distance of all \var{w} - 1 pairs of points is greater than the +##' specified threshold, then the point is marked as failing the third +##' stage. +##' +##' The speed and distance threshold should be obtained separately (see +##' \code{\link{distSpeed}}). +##' +##' @param time \code{POSIXct} object with dates and times for each point. +##' @param lon numeric vectors of longitudes, in decimal degrees. +##' @param lat numeric vector of latitudes, in decimal degrees. +##' @param id A factor grouping points in different categories +##' (e.g. individuals). +##' @param speed.thr numeric scalar: speed threshold (m/s) above which +##' filter tests should fail any given point. +##' @param dist.thr numeric scalar: distance threshold (km) above which the +##' last filter test should fail any given point. +##' @param window integer: the size of the moving window over which tests +##' should be carried out. +##' @param ... Arguments ultimately passed to \code{\link{distSpeed}}. +##' @section Warning: +##' This function applies McConnell et al.'s filter as described in Freitas +##' et al. (2008). According to the original description of the algorithm +##' in McConnell et al. (1992), the filter makes a single pass through all +##' locations. Austin et al. (2003) and other authors may have used the +##' filter this way. However, as Freitas et al. (2008) noted, this causes +##' locations adjacent to those flagged as failing to fail also, thereby +##' rejecting too many locations. In diveMove, the algorithm was modified +##' to reject only the \dQuote{peaks} in each series of consecutive +##' locations having root mean square speed higher than threshold. +##' @return +##' \code{rmsDistFilter} and \code{austFilter} return a matrix with 2 or 3 +##' columns, respectively, of logical vectors with values TRUE for points +##' that passed each stage. For the latter, positions that fail the first +##' stage fail the other stages too. The second and third columns returned +##' by \code{austFilter}, as well as those returned by \code{rmsDistFilter} +##' are independent of one another; i.e. positions that fail stage 2 do not +##' necessarily fail stage 3. +##' @author Sebastian Luque \email{spluque@@gmail.com} and Andy Liaw. +##' @references +##' McConnell BJ, Chambers C, Fedak MA. 1992. Foraging ecology of southern +##' elephant seals in relation to bathymetry and productivity of the +##' Southern Ocean. \emph{Antarctic Science} 4:393-398. +##' +##' Austin D, McMillan JI, Bowen D. 2003. A three-stage algorithm for +##' filtering erroneous Argos satellite locations. \emph{Marine Mammal +##' Science} 19: 371-383. +##' +##' Freitas C, Lydersen, C, Fedak MA, Kovacs KM. 2008. A simple new +##' algorithm to filter marine mammal ARGOS locations. Marine Mammal +##' Science DOI: 10.1111/j.1748-7692.2007.00180.x +##' @keywords manip iteration +##' @seealso \code{\link{distSpeed}} +##' @examples +##' ## Using the Example from '?readLocs': +##' utils::example("readLocs", package="diveMove", +##' ask=FALSE, echo=FALSE) +##' ringy <- subset(locs, id == "ringy" & !is.na(lon) & !is.na(lat)) +##' +##' ## Examples below use default Meeus algorithm for computing distances. +##' ## See ?distSpeed for specifying other methods. +##' ## Austin et al.'s group filter alone +##' grp <- grpSpeedFilter(ringy[, 3:5], speed.thr=1.1) +##' +##' ## McConnell et al.'s filter (root mean square test), and distance test +##' ## alone +##' rms <- rmsDistFilter(ringy[, 3:5], speed.thr=1.1, dist.thr=300) +##' +##' ## Show resulting tracks +##' n <- nrow(ringy) +##' plot.nofilter <- function(main) { +##' plot(lat ~ lon, ringy, type="n", main=main) +##' with(ringy, segments(lon[-n], lat[-n], lon[-1], lat[-1])) +##' } +##' layout(matrix(1:4, ncol=2, byrow=TRUE)) +##' plot.nofilter(main="Unfiltered Track") +##' plot.nofilter(main="Group Filter") +##' n1 <- length(which(grp)) +##' with(ringy[grp, ], segments(lon[-n1], lat[-n1], lon[-1], lat[-1], +##' col="blue")) +##' plot.nofilter(main="Root Mean Square Filter") +##' n2 <- length(which(rms[, 1])) +##' with(ringy[rms[, 1], ], segments(lon[-n2], lat[-n2], lon[-1], lat[-1], +##' col="red")) +##' plot.nofilter(main="Distance Filter") +##' n3 <- length(which(rms[, 2])) +##' with(ringy[rms[, 2], ], segments(lon[-n3], lat[-n3], lon[-1], lat[-1], +##' col="green")) +##' +##' ## All three tests (Austin et al. procedure) +##' austin <- with(ringy, austFilter(time, lon, lat, speed.thr=1.1, +##' dist.thr=300)) +##' layout(matrix(1:4, ncol=2, byrow=TRUE)) +##' plot.nofilter(main="Unfiltered Track") +##' plot.nofilter(main="Stage 1") +##' n1 <- length(which(austin[, 1])) +##' with(ringy[austin[, 1], ], segments(lon[-n1], lat[-n1], lon[-1], lat[-1], +##' col="blue")) +##' plot.nofilter(main="Stage 2") +##' n2 <- length(which(austin[, 2])) +##' with(ringy[austin[, 2], ], segments(lon[-n2], lat[-n2], lon[-1], lat[-1], +##' col="red")) +##' plot.nofilter(main="Stage 3") +##' n3 <- length(which(austin[, 3])) +##' with(ringy[austin[, 3], ], segments(lon[-n3], lat[-n3], lon[-1], lat[-1], +##' col="green")) +"austFilter" <- function(time, lon, lat, id=gl(1, 1, length(time)), + speed.thr, dist.thr, window=5, ...) +{ + ## FIRST STAGE ******************************************************** + locs <- data.frame(time, lon, lat) + + ## Do first stage over each seal's data, returns vector as long as locs + first <- unlist(by(locs, id, grpSpeedFilter, speed.thr, window, ...), + use.names=FALSE) + ## SECOND AND THIRD STAGES ******************************************** + good <- which(first) # native subscripts that passed + last <- do.call(rbind, by(locs[good, ], id[good], rmsDistFilter, + speed.thr, window, dist.thr, ...)) + filter123 <- cbind(firstPass=first, + secondPass=first, # 2nd and 3rd start the same as 1st + thirdPass=first) + filter123[good, 2:3] <- last + filter123 +} + +##' @describeIn austFilter Do stage one on 3-column matrix \code{x} +##' @param x 3-column matrix with column 1: \code{POSIXct} vector; column +##' 2: numeric longitude vector; column 3: numeric latitude vector. +##' @return \code{grpSpeedFilter} logical vector indicating those lines +##' that passed the test. "grpSpeedFilter" <- function(x, speed.thr, window=5, ...) { ## Value: Do stage one on matrix x (assuming it's a single unit), @@ -34,7 +190,8 @@ pass } - +##' @describeIn austFilter Apply McConnell et al's filter and Austin et +##' al's last stage "rmsDistFilter" <- function(x, speed.thr, window=5, dist.thr, ...) { ## Value: Run McConnell et al's filter and Austin et al's last stage, @@ -121,39 +278,4 @@ } -"austFilter" <- function(time, lon, lat, id=gl(1, 1, length(time)), - speed.thr, dist.thr, window=5, ...) -{ - ## Value: A matrix with logicals indicating whether each reading failed - ## each filter. This runs the filters in Austin et al. (2003). - ## Results are presented from each filter, independently of the others - ## for flexibility. - ## -------------------------------------------------------------------- - ## Arguments: lat and lon=latitude and longitude vectors in degrees; - ## time=POSIXct object with times for each point; id=factor identifying - ## sections of the data to be treated separately; speed.thr=speed - ## threshold (m/s); dist.thr=distance threshold (km); window=size of - ## window to test; ...=arguments passed to grpSpeedFilter() and - ## rmsDistFilter(), namely only 'method' for now. - ## -------------------------------------------------------------------- - ## Author: Sebastian Luque - ## -------------------------------------------------------------------- - ## FIRST STAGE ******************************************************** - locs <- data.frame(time, lon, lat) - - ## Do first stage over each seal's data, returns vector as long as locs - first <- unlist(by(locs, id, grpSpeedFilter, speed.thr, window, ...), - use.names=FALSE) - - ## SECOND AND THIRD STAGES ******************************************** - good <- which(first) # native subscripts that passed - last <- do.call(rbind, by(locs[good, ], id[good], rmsDistFilter, - speed.thr, window, dist.thr, ...)) - filter123 <- cbind(firstPass=first, - secondPass=first, # 2nd and 3rd start the same as 1st - thirdPass=first) - filter123[good, 2:3] <- last - filter123 -} - ## TEST ZONE -------------------------------------------------------------- diff --git a/R/bouts.R b/R/bouts.R index 0fd3be1..c525414 100644 --- a/R/bouts.R +++ b/R/bouts.R @@ -1,636 +1,73 @@ - -"logit" <- function(p) log(p / (1 - p)) - -"unLogit" <- function(logit) exp(logit) / (exp(logit) + 1) - -"boutfreqs" <- function(x, bw, method=c("standard", "seq.diff"), - plot=TRUE, ...) -{ - ## Value: data frame with log frequencies and bin mid-points - ## -------------------------------------------------------------------- - ## Arguments: x=numeric vector, bw=bin width for histogram, - ## method=method used to construct the histogram, plot=logical whether - ## to plot or not; ...=arguments passed to hist (must exclude 'breaks' - ## and 'include.lowest') - ## -------------------------------------------------------------------- - ## Author: Sebastian P. Luque - ## -------------------------------------------------------------------- - method <- match.arg(method) - switch(method, - standard = {upper <- max(x, na.rm=TRUE) - brks <- seq(min(x, na.rm=TRUE), upper, bw) - if (brks[length(brks)] < upper) { - brks <- c(brks, brks[length(brks)] + bw) - } - h <- hist(x, breaks=brks, include.lowest=TRUE, - plot=plot, ...)}, - seq.diff = {diff.x <- abs(diff(x)) - upper <- max(diff.x, na.rm=TRUE) - brks <- seq(0, upper, bw) - if (brks[length(brks)] < upper) { - brks <- c(brks, brks[length(brks)] + bw) - } - h <- hist(diff.x, breaks=brks, include.lowest=TRUE, - plot=plot, ...)}) - ok <- which(h$counts > 0) - freq.adj <- h$counts[ok] / diff(c(0, ok)) - data.frame(lnfreq=log(freq.adj), x=h$mids[ok]) -} - -"boutinit" <- function(lnfreq, x.break, plot=TRUE, ...) -{ - ## Value: list with starting values for nls bout function - ## -------------------------------------------------------------------- - ## Arguments: lnfreq=data frame with 'lnfreq' (log frequencies) and 'x' - ## (midpoints); x.break=vector of length 1 or 2 with x value(s) - ## defining the break(s) point(s) for broken stick model, such that x < - ## x.break[1] is 1st process, and x >= x.break[1] & x < x.break[2] is - ## 2nd one, and x >= x.break[2] is 3rd one; plot=logical whether to - ## plot or not; ... arguments passed to lattice's xyplot() - ## -------------------------------------------------------------------- - ## Author: Sebastian P. Luque - ## -------------------------------------------------------------------- - nproc <- length(x.break) - if (nproc > 2) stop ("x.break must be of length 1 or 2") - procf <- cut(lnfreq$x, breaks=c(min(lnfreq$x), x.break, max(lnfreq$x)), - include.lowest=TRUE, right=TRUE) - coefs <- by(lnfreq, procf, function(k) {coef(lm(lnfreq ~ x, k))}) - pars <- lapply(coefs, function(p) { - lambda <- as.vector(-p[2]) - a <- as.vector(exp(p[1]) / lambda) - c(a=a, lambda=lambda) - }) - if (plot) { - requireNamespace("lattice", quietly=TRUE) || - stop("lattice package is not available") - pp <- lattice::xyplot(lnfreq ~ x, lnfreq, groups=procf, - pars=pars, panel=function(x, y, ..., - pars=pars, ab=coefs) { - lattice::panel.xyplot(x, y, ...) - a1 <- pars[[1]][1] - lambda1 <- pars[[1]][2] - a2 <- pars[[2]][1] - lambda2 <- pars[[2]][2] - if (length(pars) < 3) { - "procFun2" <- function(x) { - log(a1 * lambda1 * exp(-lambda1 * x) + - a2 * lambda2 * exp(-lambda2 * x)) - } - lattice::panel.curve(procFun2, - min(x), max(x), - add=TRUE) - lattice::panel.abline(ab[[1]], lty=2) - lattice::panel.abline(ab[[2]], lty=3) - } else { - a3 <- pars[[3]][1] - lambda3 <- pars[[3]][2] - "procFun3" <- function(x) { - log(a1 * lambda1 * exp(-lambda1 * x) + - a2 * lambda2 * exp(-lambda2 * x) + - a3 * lambda3 * exp(-lambda3 * x)) - } - lattice::panel.curve(procFun3, - min(x), max(x), - add=TRUE) - lattice::panel.abline(ab[[1]], lty=2) - lattice::panel.abline(ab[[2]], lty=3) - lattice::panel.abline(ab[[3]], lty=4) - } - }, ...) - print(pp) - } - pars +##' Log likelihood function of parameters given observed data +##' +##' This function defines a closure, where \code{x} will be the object +##' passed to it. +##' @param x numeric vector of independent data to be described by the +##' function. +##' @param x0 numerical one-dimensional vector of coefficients. +##' @param transformed logical indicating whether coefficients need to be +##' transformed back to original scale to compute the negative log +##' likelihood. +##' @return \code{ll.chooser} returns the negative log likelihood function +##' of the joint distribution. +##' @rdname boutsMLEll +##' @keywords internal +"boutsMLEll.chooser" <- function(x, x0, transformed=TRUE) +{ + pars.l <- build.p.lambda(x0) + + switch(as.character(length(pars.l[["lambdas"]])), + "2" = { + function(p, lambda0, lambda1) { + if (transformed) { + p <- unLogit(p) + lambda0 <- exp(lambda0) + lambda1 <- exp(lambda1) + } + -sum(.bouts2MLEll(x, p, lambda0=lambda0, + lambda1=lambda1)) + } }, + "3" = { + function(p0, p1, lambda0, lambda1, lambda2) { + if (transformed) { + p0 <- unLogit(p0) + p1 <- unLogit(p1) + lambda0 <- exp(lambda0) + lambda1 <- exp(lambda1) + lambda2 <- exp(lambda2) + } + -sum(.bouts3MLEll(x, p0=p0, p1=p1, lambda0=lambda0, + lambda1=lambda1, lambda2=lambda2)) + } }, + stop("Not implemented")) +} + +##' @describeIn boutsMLEll Log likelihood function in a 2-process Poisson +##' mixture +##' @param p,lambda0,lambda1 numeric: parameters of the model. +##' @return numeric vector +".bouts2MLEll" <- function(x, p, lambda0, lambda1) +{ + term0 <- p * lambda0 * exp(-lambda0 * x) + term1 <- (1 - p) * lambda1 * exp(-lambda1 * x) + res <- term0 + term1 + log(res) +} + +##' @describeIn boutsMLEll Log likelihood function in a 3-process Poisson +##' mixture +##' @param p0,p1,lambda2 numeric: parameters of the model. +##' @return numeric vector +".bouts3MLEll" <- function(x, p0, p1, lambda0, lambda1, lambda2) +{ + term0 <- p0 * lambda0 * exp(-lambda0 * x) + term1 <- p1 * (1 - p0) * lambda1 * exp(-lambda1 * x) + term2 = (1 - p1) * (1 - p0) * lambda2 * exp(-lambda2 * x) + res = term0 + term1 + term2 + ## if (any(res <= 0)) message("negatives at:", p0, p1, + ## lambda0, lambda1, lambda2) + log(res) } -"bouts2.nlsFUN" <- function(x, a1, lambda1, a2, lambda2) { - log(a1 * lambda1 * exp(-lambda1 * x) + a2 * lambda2 * exp(-lambda2 * x)) -} - -"bouts2.nls" <- function(lnfreq, start, maxiter) -{ - ## Value: list with non linear fitted model and bout ending criterion - ## -------------------------------------------------------------------- - ## Arguments: lnfreq=data frame with 'lnfreq' (log frequencies) and 'x' - ## (midpoints), start, maxiter=arguments for nls. - ## -------------------------------------------------------------------- - ## Author: Sebastian P. Luque - ## -------------------------------------------------------------------- - fit.nls <- nls(lnfreq ~ bouts2.nlsFUN(x, a1, lambda1, a2, lambda2), - data=lnfreq, start=start, - control=nls.control(maxiter=maxiter)) - fit.nls -} - -"bouts2.nlsBEC" <- function(fit) -{ - ## Value: Numeric with bout ending criterion - ## -------------------------------------------------------------------- - ## Arguments: list with nls fit - ## -------------------------------------------------------------------- - ## Author: Sebastian P. Luque - ## -------------------------------------------------------------------- - coefs <- coef(fit) - if (length(coefs) != 4) { - stop("fit must have 4 coefficients in a 2-process model") - } - a1_hat <- as.vector(coefs[1]) - lambda1_hat <- as.vector(coefs[2]) - a2_hat <- as.vector(coefs[3]) - lambda2_hat <- as.vector(coefs[4]) - log((a1_hat * lambda1_hat) / (a2_hat * lambda2_hat)) / - (lambda1_hat - lambda2_hat) -} - -"plotBouts2.nls" <- function(fit, lnfreq, bec.lty=2, ...) -{ - ## Value: plot of fitted model of log frequencies on x, with bec line. - ## -------------------------------------------------------------------- - ## Arguments: fit=nls list, lnfreq=data frame with named objects lnfreq - ## and x, bec.lty=line type for arrow; ...=arguments passed to - ## plot() - ## -------------------------------------------------------------------- - ## Author: Sebastian P. Luque - ## -------------------------------------------------------------------- - coefs <- coef(fit) - bec <- bouts2.nlsBEC(fit) - a1_hat <- as.vector(coefs[1]) - lambda1_hat <- as.vector(coefs[2]) - a2_hat <- as.vector(coefs[3]) - lambda2_hat <- as.vector(coefs[4]) - plot(lnfreq ~ x, lnfreq, type="n", ...) - curve(log(a1_hat * lambda1_hat * exp(-lambda1_hat * x) + - a2_hat * lambda2_hat * exp(-lambda2_hat * x)), - min(lnfreq$x), max(lnfreq$x), add=TRUE) - points(lnfreq ~ x, lnfreq, pch=21, bg="white") - becy <- predict(fit, list(x=bec)) - usr <- par("usr") - arrows(bec, becy, bec, usr[3], code=0, lty=bec.lty) - legend(bec, usr[3] + ((usr[4] - usr[3]) * 0.08), - paste("bec = ", round(bec, 2), sep=""), bty="n", cex=0.8) - a1_hat <- round(a1_hat, 2) - a2_hat <- round(a2_hat, 3) - lambda1_hat <- round(lambda1_hat, 3) - lambda2_hat <- round(lambda2_hat, 4) - legend("topright", - legend=bquote(y == log(.(a1_hat) %.% .(lambda1_hat) %.% - e^(- .(lambda1_hat) * x) + - .(a2_hat) %.% .(lambda2_hat) %.% - e^(- .(lambda2_hat) * x))), - bty="n", cex=0.8, adj=c(0, 1)) -} - -"bouts3.nlsFUN" <- function(x, a1, lambda1, a2, lambda2, a3, lambda3) { - log(a1 * lambda1 * exp(-lambda1 * x) + - a2 * lambda2 * exp(-lambda2 * x) + - a3 * lambda3 * exp(-lambda3 * x)) -} - -"bouts3.nls" <- function(lnfreq, start, maxiter) -{ - ## Value: list with non linear fitted model and bout ending criterion - ## -------------------------------------------------------------------- - ## Arguments: lnfreq=data frame with 'lnfreq' (log frequencies) and 'x' - ## (midpoints), start, maxiter=arguments for nls. - ## -------------------------------------------------------------------- - ## Author: Sebastian P. Luque - ## -------------------------------------------------------------------- - fit.nls <- nls(lnfreq ~ bouts3.nlsFUN(x, a1, lambda1, a2, lambda2, - a3, lambda3), data=lnfreq, - start=start, control=nls.control(maxiter=maxiter)) - fit.nls -} - -"bouts3.nlsBEC" <- function(fit) -{ - ## Value: Numeric with bout ending criterion - ## -------------------------------------------------------------------- - ## Arguments: list with nls fit - ## -------------------------------------------------------------------- - ## Author: Sebastian P. Luque - ## -------------------------------------------------------------------- - coefs <- coef(fit) - if (length(coefs) != 6) { - stop("fit must have 6 coefficients in a 3-process model") - } - a1_hat <- as.vector(coefs[1]) - lambda1_hat <- as.vector(coefs[2]) - a2_hat <- as.vector(coefs[3]) - lambda2_hat <- as.vector(coefs[4]) - a3_hat <- as.vector(coefs[5]) - lambda3_hat <- as.vector(coefs[6]) - b1 <- log((a1_hat * lambda1_hat) / (a2_hat * lambda2_hat)) / - (lambda1_hat - lambda2_hat) - b2 <- log((a2_hat * lambda2_hat) / (a3_hat * lambda3_hat)) / - (lambda2_hat - lambda3_hat) - c(bec1=b1, bec2=b2) -} - -"plotBouts3.nls" <- function(fit, lnfreq, bec.lty=2, ...) -{ - ## Value: plot of fitted model of log frequencies on x, with bec lines. - ## -------------------------------------------------------------------- - ## Arguments: fit=nls list, lnfreq=data frame with named objects lnfreq - ## and x, bec.lty=line type for arrow; ...=arguments passed to - ## plot() - ## -------------------------------------------------------------------- - ## Author: Sebastian P. Luque - ## -------------------------------------------------------------------- - coefs <- coef(fit) - bec <- bouts3.nlsBEC(fit) - a1_hat <- as.vector(coefs[1]) - lambda1_hat <- as.vector(coefs[2]) - a2_hat <- as.vector(coefs[3]) - lambda2_hat <- as.vector(coefs[4]) - a3_hat <- as.vector(coefs[5]) - lambda3_hat <- as.vector(coefs[6]) - plot(lnfreq ~ x, lnfreq, type="n", ...) - curve(log(a1_hat * lambda1_hat * exp(-lambda1_hat * x) + - a2_hat * lambda2_hat * exp(-lambda2_hat * x) + - a3_hat * lambda3_hat * exp(-lambda3_hat * x)), - min(lnfreq$x), max(lnfreq$x), add=TRUE) - points(lnfreq ~ x, lnfreq, pch=21, bg="white") - becy <- predict(fit, list(x=bec)) - usr <- par("usr") - arrows(bec, becy, bec, usr[3], code=0, lty=bec.lty) - legend(bec[1], usr[3] + ((usr[4] - usr[3]) * 0.12), - paste("bec1 = ", round(bec[1], 2), sep=""), bty="n", cex=0.8) - legend(bec[2], usr[3] + ((usr[4] - usr[3]) * 0.08), - paste("bec2 = ", round(bec[2], 2), sep=""), bty="n", cex=0.8) - a1_hat <- round(a1_hat, 2) - a2_hat <- round(a2_hat, 3) - a3_hat <- round(a3_hat, 3) - lambda1_hat <- round(lambda1_hat, 3) - lambda2_hat <- round(lambda2_hat, 4) - lambda3_hat <- round(lambda3_hat, 4) - legend("topright", - legend=bquote(y == log(.(a1_hat) %.% .(lambda1_hat) %.% - e^(- .(lambda1_hat) * x) + - .(a2_hat) %.% .(lambda2_hat) %.% - e^(- .(lambda2_hat) * x) + - .(a3_hat) %.% .(lambda3_hat) %.% - e^(- .(lambda3_hat) * x))), - bty="n", cex=0.8, adj=c(0, 1)) -} - -"labelBouts" <- function(x, bec, bec.method=c("standard", "seq.diff")) -{ - ## Value: a numeric vector labelling each row in x with a unique, - ## sequential bout number - ## -------------------------------------------------------------------- - ## Arguments: x=numeric vector or matrix with variable or variables, - ## respectively, to use for splitting bouts; bec=vector or matrix with - ## corresponding bout ending criterion (i.e. each element/column of x - ## is compared against the element in bec at the same index), - ## bec.method=what method was used to identify bouts - ## -------------------------------------------------------------------- - ## Author: Sebastian P. Luque - ## -------------------------------------------------------------------- - if (!is(x, "matrix")) x <- as.matrix(x) - if (!is(bec, "matrix")) bec <- as.matrix(bec) - if (!identical(dim(x), dim(bec))) - stop(paste("x and bec must have the same", "dimensions")) - bec.method <- match.arg(bec.method) - switch(bec.method, - standard = {xx <- x[-1, ]}, - seq.diff = {xx <- apply(x, 2, function(k) abs(diff(k)))}) - testfun <- function(xi, beci) ifelse(xi > beci, 1, 2) - bectest <- mapply(testfun, xx, bec[-1, ]) - dim(bectest) <- dim(xx) - bectest.full <- rbind(1, bectest) - bectest.any <- apply(bectest.full, 1, function(k) any(k < 2)) - chgbout <- which(bectest.any) - boutno <- seq(along=chgbout) - reps <- diff(c(chgbout, nrow(x) + 1)) - rep(boutno, reps) -} - -"bouts2.mleFUN" <- function(x, p, lambda1, lambda2) -{ - log(p * lambda1 * exp(-lambda1 * x) + - (1 - p) * lambda2 * exp(-lambda2 * x)) -} - -"bouts2.ll" <- function(x) # 2-process Poisson -{ - function(p, lambda1, lambda2) { - -sum(diveMove::bouts2.mleFUN(x, p, lambda1, lambda2)) - } -} - -"bouts2.LL" <- function(x) # 2-process Poisson; transformed model -{ - function(p, lambda1, lambda2) { - p <- unLogit(p) - lambda1 <- exp(lambda1) - lambda2 <- exp(lambda2) - -sum(diveMove::bouts2.mleFUN(x, p, lambda1, lambda2)) - } -} - -## "bouts3.mleFUN" <- function(x, p1, lambda1, p2, lambda2, lambda3) -## { -## log(p1 * lambda1 * exp(-lambda1 * x) + -## p2 * lambda2 * exp(-lambda2 * x) + -## (1 - (p1 + p2)) * exp(-lambda3 * x)) -## } - -## "bouts3.ll" <- function(x) # 3-process Poisson -## { -## function(p1, lambda1, p2, lambda2, lambda3) { -## -sum(diveMove::bouts3.mleFUN(x, p1, lambda1, -## p2, lambda2, lambda3)) -## } -## } - -## "bouts3.LL" <- function(x) # 3-process Poisson; transformed model -## { -## function(p1, lambda1, p2, lambda2, lambda3) { -## p1 <- unLogit(p1); p2 <- unLogit(p2) -## lambda1 <- exp(lambda1) -## lambda2 <- exp(lambda2) -## lambda3 <- exp(lambda3) -## -sum(diveMove::bouts3.mleFUN(x, p1, lambda1, -## p2, lambda2, lambda3)) -## } -## } - -"bouts.mle" <- function(ll.fun, start, x, ...) -{ - ## Value: An mle object with fitted parameters - ## -------------------------------------------------------------------- - ## Arguments: loglik.fun=string naming the function to fit; start=named - ## list with starting values (exactly as given in ll.fun2, i.e. the - ## reparameterized versions); x=numeric vector with variable to model; - ## ...=passed to mle - ## -------------------------------------------------------------------- - ## Author: Sebastian P. Luque - ## -------------------------------------------------------------------- - loglik.fun <- ll.fun(x) - fit.mle <- mle(loglik.fun, start=start, ...) - fit.mle -} - -"bouts2.mleBEC" <- function(fit) -{ - ## Value: Numeric with bout ending criterion - ## -------------------------------------------------------------------- - ## Arguments: fit=mle object with fitted 2-process model - ## -------------------------------------------------------------------- - ## Author: Sebastian P. Luque - ## -------------------------------------------------------------------- - coefs <- coef(fit) - if (length(coefs) != 3) { - stop("fit must have 3 coefficients in a 2-process model") - } - p_hat <- as.vector(coefs[1]) - lambda1_hat <- as.vector(coefs[2]) - lambda2_hat <- as.vector(coefs[3]) - log((p_hat * lambda1_hat) / ((1 - p_hat) * lambda2_hat)) / - (lambda1_hat - lambda2_hat) -} - -"plotBouts2.mle" <- function(fit, x, xlab="x", ylab="Log Frequency", - bec.lty=2, ...) -{ - ## Value: plot - ## -------------------------------------------------------------------- - ## Arguments: fit=mle object with fitted 2-process model, x=numeric - ## vector with observed data; xlab=ylab=strings for titles; - ## bec.lty=line type for bec; ...=args to curve(). - ## -------------------------------------------------------------------- - ## Author: Sebastian P. Luque - ## -------------------------------------------------------------------- - coefs <- coef(fit) - p_hat <- as.vector(coefs[1]) - lambda1_hat <- as.vector(coefs[2]) - lambda2_hat <- as.vector(coefs[3]) - bec <- bouts2.mleBEC(fit) - range.x <- range(x, na.rm=TRUE) - curve(log(p_hat * lambda1_hat * exp(-lambda1_hat * x) + - (1 - p_hat) * lambda2_hat * exp(-lambda2_hat * x)), - from=range.x[1], to=range.x[2], xlab=xlab, ylab=ylab, - xaxs="i", yaxs="i", ...) - rug(jitter(x), side=3, ticksize=0.015, quiet=TRUE) - becy <- bouts2.mleFUN(bec, p=p_hat, lambda1=lambda1_hat, - lambda2=lambda2_hat) - usr <- par("usr") - arrows(bec, becy, bec, usr[3], code=0, lty=bec.lty) - legend(bec, usr[3] + ((usr[4] - usr[3]) * 0.08), - paste("bec = ", round(bec, 2), sep=""), bty="n", cex=0.8) - p_hat <- round(p_hat, 2) - lambda1_hat <- round(lambda1_hat, 3) - lambda2_hat <- round(lambda2_hat, 4) - legend("topright", - legend=bquote(y == log(.(p_hat) %.% .(lambda1_hat) %.% - e^(- .(lambda1_hat) * x) + - .(1 - p_hat) %.% .(lambda2_hat) %.% - e^(- .(lambda2_hat) * x))), - bty="n", cex=0.8, adj=c(0, 1)) -} - -"plotBouts2.cdf" <- function(fit, x, draw.bec=FALSE, bec.lty=2, ...) -{ - ## Value: plot - ## -------------------------------------------------------------------- - ## Arguments: fit=mle object with fitted 2-process model, x=numeric - ## vector with observed data, draw.bec=logical; whether to draw the bec; - ## bec.lty=line type for the bec reference line; ...=passed to plot() - ## -------------------------------------------------------------------- - ## Author: Sebastian P. Luque - ## -------------------------------------------------------------------- - cdf.fun <- function(x, p, lambda1, lambda2) { - 1 - p*exp(-lambda1*x) - (1 - p)*exp(-lambda2*x) - } - coefs <- coef(fit) - if (!length(coefs) %in% c(3, 4)) - stop("Number of coefficients in 'fit' must be 3 or 4") - if (length(coefs) == 4) { - p <- coefs[1] / (coefs[1] + coefs[3]) - coefs <- c(p, coefs[2], coefs[4]) - } - x <- log1p(x) - x.ecdf <- ecdf(x) - plot(x.ecdf, las=1, cex.p=0.5, pch=19, xaxt="n", ...) - xorig.pretty <- axTicks(1, axp=c(range(exp(x)), 1), log=TRUE) - xat <- c(0, log1p(xorig.pretty[-1])) - axis(1, at=xat, labels=c(0, xorig.pretty[-1])) - plot(function(x) { - x <- expm1(x) - cdf.fun(x, coefs[1], coefs[2], coefs[3]) - }, 0, max(x), add=TRUE) - if (draw.bec) { - bec <- bec2(fit) - becy <- cdf.fun(bec, coefs[1], coefs[2], coefs[3]) - arrows(log1p(bec), becy, log1p(bec), 0, code=0, lty=bec.lty) - legend(log1p(bec), 0.1, paste("bec = ", round(bec, 2), sep=""), - bty="n", cex=0.8) - } -} - - -## We set these here to avoid collating problems -setMethod("bec2", signature(fit="nls"), bouts2.nlsBEC) -setMethod("bec2", signature(fit="mle"), bouts2.mleBEC) -setMethod("bec3", signature(fit="nls"), bouts3.nlsBEC) - -## Declare global variables, if needed -if (getRversion() >= "2.15.1") utils::globalVariables("x") - - ## TEST ZONE -------------------------------------------------------------- - -## "bouts3.mleFUN" <- function(x, p1, lambda1, p2, lambda2, lambda3) -## { -## log(p1 * lambda1 * exp(-lambda1 * x) + -## p2 * lambda2 * exp(-lambda2 * x) + -## (1 - (p1 + p2)) * exp(-lambda3 * x)) -## } - -## "bouts3.ll" <- function(x) # 3-process Poisson -## { -## function(p1, lambda1, p2, lambda2, lambda3) { -## -sum(bouts3.mleFUN(x, p1, lambda1, -## p2, lambda2, lambda3)) -## } -## } - -## "bouts3.LL" <- function(x) # 3-process Poisson; transformed model -## { -## function(p1, lambda1, p2, lambda2, lambda3) { -## p1 <- unLogit(p1); p2 <- unLogit(p2) -## lambda1 <- exp(lambda1) -## lambda2 <- exp(lambda2) -## lambda3 <- exp(lambda3) -## -sum(bouts3.mleFUN(x, p1, lambda1, -## p2, lambda2, lambda3)) -## } -## } - -## ## Example code -## utils::example("diveStats", package="diveMove", -## ask=FALSE, echo=FALSE) -## postdives <- tdrX.tab$postdive.dur -## postdives.diff <- abs(diff(postdives)) - -## ## Remove isolated dives -## postdives.diff <- postdives.diff[postdives.diff < 4000] -## lnfreq <- boutfreqs(postdives.diff, bw=0.1, plot=FALSE) -## startval <- boutinit(lnfreq, c(50, 400)) -## p1 <- startval[[1]]["a"] / (startval[[1]]["a"] + startval[[2]]["a"] + -## startval[[3]]["a"]) -## p2 <- startval[[2]]["a"] / (startval[[1]]["a"] + startval[[2]]["a"] + -## startval[[3]]["a"]) - -## ## Fit the reparameterized (transformed parameters) model -## ## Drop names by wrapping around as.vector() -## init.parms <- list(p1=as.vector(logit(p1)), -## lambda1=as.vector(log(startval[[1]]["lambda"])), -## p2=as.vector(logit(p2)), -## lambda2=as.vector(log(startval[[2]]["lambda"])), -## lambda3=as.vector(log(startval[[3]]["lambda"]))) -## bout.fit1 <- bouts.mle(bouts3.LL, start=init.parms, x=postdives.diff, -## lower=c(0, -5, -20, -10, -10), -## upper=c(20, 0, 0, 0, 0), -## method="L-BFGS-B", control=list(trace=TRUE)) -## coefs <- as.vector(coef(bout.fit1)) - -## ## Un-transform and fit the original parameterization -## init.parms <- list(p1=unLogit(coefs[1]), lambda1=exp(coefs[2]), -## p2=unLogit(coefs[3]), lambda2=exp(coefs[4]), -## lambda3=exp(coefs[5])) -## bout.fit2 <- bouts.mle(bouts3.ll, x=postdives.diff, start=init.parms, -## method="L-BFGS-B", lower=rep(1e-08, 5), -## control=list(parscale=c(1, 0.1, 1, 0.01, 0.001))) -## coefs <- as.vector(coef(bout.fit2)) -## curve(log(coefs[1] * coefs[2] * exp(-coefs[2] * x) + -## coefs[3] * coefs[4] * exp(-coefs[4] * x) + -## (1 - (coefs[1] + coefs[3])) * exp(-coefs[5] * x)), 0, 10000) - -## ## Simulations - -## ## We choose these -## set.seed(10) -## n.sim <- 10000 -## p1.sim <- 0.7 -## lambda1.sim <- 0.05 -## p2.sim <- 0.2 -## lambda2.sim <- 0.005 -## lambda3.sim <- 0.001 -## pars.sim <- c(n.sim, p1.sim, lambda1.sim, -## p2.sim, lambda2.sim, lambda3.sim) - -## ## Try with boot for many simulations -## library(boot) -## nls.fun <- function(pars) { # pars=c(n, p, lambda1, lambda2) -## x <- ifelse(runif(pars[1]) < pars[2], rexp(pars[1], pars[3]), -## rexp(pars[1], pars[4])) -## x.hist <- boutfreqs(x, bw=5, plot=FALSE) -## startval <- boutinit(x.hist, 80, plot=FALSE) -## fit <- bouts2.nls(x.hist, start=startval, maxiter=500) -## coefs.nls <- coef(fit) -## c(coefs.nls[1]/(coefs.nls[1] + coefs.nls[3]), -## coefs.nls[2], coefs.nls[4]) -## } -## boot.nls <- boot(pars.sim, nls.fun, sim="parametric", R=1000) -## ## Bias with respect to known pars -## means.nls <- apply(boot.nls$t, 2, mean) -## se.nls <- apply(boot.nls$t, 2, function(x) sd(x)/sqrt(length(x))) -## bias.nls <- means.nls - pars.sim[-1] - -## ## Fit with MLM -## mle.fun <- function(pars) { -## x <- ifelse(runif(pars[1]) < pars[2], rexp(pars[1], pars[3]), -## rexp(pars[1], pars[4])) -## x.hist <- boutfreqs(x, bw=5, plot=FALSE) -## startval <- boutinit(x.hist, 80, plot=FALSE) -## p <- startval$a1 / (startval$a1 + startval$a2) -## init.parms <- list(p=logit(p), lambda1=log(startval$lambda1), -## lambda2=log(startval$lambda2)) -## fit1 <- bouts.mle(bouts2.LL, start=init.parms, x=x, -## method="L-BFGS-B", lower=c(-2, -5, -10)) -## coefs <- as.vector(coef(fit1)) -## init.parms <- list(p=unLogit(coefs[1]), lambda1=exp(coefs[2]), -## lambda2=exp(coefs[3])) -## fit2 <- bouts.mle(bouts2.ll, x=x, start=init.parms, -## method="L-BFGS-B", lower=rep(1e-08, 3), -## control=list(parscale=c(1, 0.1, 0.01))) -## coef(fit2) -## } -## boot.mle <- boot(pars.sim, mle.fun, sim="parametric", R=1000) -## ## Bias with respect to known pars -## means.mle <- apply(boot.mle$t, 2, mean) -## se.mle <- apply(boot.mle$t, 2, function(x) sd(x)/sqrt(length(x))) -## bias.mle <- means.mle - pars.sim[-1] - -## ## Box plots of simulations -## boot.pars <- data.frame(method=factor(rep(c("MLM", "SDA"), -## each=c(nrow(boot.mle$t), nrow(boot.nls$t)))), -## p=c(boot.mle$t[, 1], boot.nls$t[, 1]), -## lambda1=c(boot.mle$t[, 2], boot.nls$t[, 2]), -## lambda2=c(boot.mle$t[, 3], boot.nls$t[, 3])) -## bplot.pars <- bwplot(p + lambda1 + lambda2 ~ method, data=boot.pars, -## layout=c(3, 1), as.table=TRUE, allow.multiple=TRUE, -## outer=TRUE, do.out=FALSE, -## scales=list(y="free", tck=c(0.8, 0), alternating=1, -## rot=c(0, 90), x=list(labels=c("MLM", "SDA"))), -## strip=strip.custom(bg="transparent", -## factor.levels=c(expression(italic(p)), -## expression(italic(lambda[f])), -## expression(italic(lambda[s])))), -## ylab="Estimated value", pch="|", -## panel=function(x, y, ...) { -## panel.bwplot(x, y, ...) -## if (panel.number() == 1) -## panel.abline(h=0.7, lty=2, ...) -## if (panel.number() == 2) -## panel.abline(h=0.05, lty=2, ...) -## if (panel.number() == 3) -## panel.abline(h=0.005, lty=2, ...) -## }) - -## trellis.device(pdf, file="par-bias.pdf", color=FALSE, -## width=5, height=5) -## trellis.par.set(box.umbrella=list(lty=1)) -## print(bplot.pars) -## dev.off() diff --git a/R/bouts_helpers.R b/R/bouts_helpers.R new file mode 100644 index 0000000..c32fc1f --- /dev/null +++ b/R/bouts_helpers.R @@ -0,0 +1,199 @@ +##' Histogram of log-transformed frequencies +##' @param x numeric vector on which bouts will be identified based on +##' \dQuote{method}. For \code{labelBouts} it can also be a matrix with +##' different variables for which bouts should be identified. +##' @param bw numeric scalar: bin width for the histogram. +##' @param method character: method used for calculating the frequencies: +##' \dQuote{standard} simply uses x, while \dQuote{seq.diff} uses the +##' sequential differences method. +##' @param plot logical, whether to plot results or not. +##' @param ... For \code{boutfreqs}, arguments passed to hist (must exclude +##' \code{breaks} and \code{include.lowest}) +##' @return +##' \code{boutfreqs} returns an object of class \code{Bouts}, with slot +##' \code{lnfreq} consisting of a data frame with components \var{lnfreq} +##' containing the log frequencies and \var{x}, containing the +##' corresponding mid points of the histogram. Empty bins are excluded. A +##' plot (histogram of \emph{input data}) is produced as a side effect if +##' argument plot is \code{TRUE}. See the Details section. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +"boutfreqs" <- function(x, bw, method=c("standard", "seq.diff"), + plot=TRUE, ...) +{ + method <- match.arg(method) + switch(method, + standard = {upper <- max(x, na.rm=TRUE) + brks <- seq(min(x, na.rm=TRUE), upper, bw) + if (brks[length(brks)] < upper) { + brks <- c(brks, brks[length(brks)] + bw) + } + h <- hist(x, breaks=brks, include.lowest=TRUE, + plot=plot, ...)}, + seq.diff = {diff.x <- abs(diff(x)) + upper <- max(diff.x, na.rm=TRUE) + brks <- seq(0, upper, bw) + if (brks[length(brks)] < upper) { + brks <- c(brks, brks[length(brks)] + bw) + } + h <- hist(diff.x, breaks=brks, include.lowest=TRUE, + plot=plot, ...)}) + ok <- which(h$counts > 0) + freq.adj <- h$counts[ok] / diff(c(0, ok)) + new("Bouts", x=x, method=method, + lnfreq=data.frame(lnfreq=log(freq.adj), x=h$mids[ok])) +} + +##' Utilities for Poisson mixture analyses +##' +##' \code{calc.p} computes \code{p} (proportion) parameter from \code{a} +##' and \code{lambda} coefficients in a broken stick model. +##' @param coefs numeric matrix [2,N] of coefficients (\code{a} and +##' \code{lambda}) in rows for each process of the model in columns. +##' Columns are assumed to be in decreasing order with respect to +##' \code{lambda} +##' @return numeric vector with proportion parameters implied by +##' \code{coefs}. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @rdname bouts-internal +##' @keywords internal +"calc.p" <- function(coefs) +{ + p <- rep(NA_real_, ncol(coefs) - 1) + for (coln in seq_len(ncol(coefs) - 1)) { + procn1 <- coefs[, coln] + procn2 <- coefs[, coln + 1] + a1 <- procn1[1] + a2 <- procn2[1] + p[coln] <- a1 / (a1 + a2) + } + p +} + +##' Build a list of \code{p} and \code{lambda} parameters from +##' one-dimensional vector of coefficients +##' +##' \code{build.p.lambda} parses the \code{x} vector, usually returned by +##' the \code{coef} method, where \eqn{x = +##' (p_0,\dots,p_n,\lambda_1,\dots,\lambda_{n+1})}{x=(p_0,...,p_n,lambda_0,...,lambda_n+1)}, +##' and build a named list with \code{p} and \code{lambda} elements to use +##' in fitting functions. +##' @param x numeric vector of coefficients +##' @return named (\code{p}, \code{lambda}) list with parsed coefficients. +##' @rdname bouts-internal +"build.p.lambda" <- function(x) { + npars <- length(x) + switch(as.character(npars), + "3" = { + p <- x[1] + lambdas <- x[2:length(x)] + }, + "5" = { + p <- x[1:2] + lambdas <- x[3:length(x)] + }, + stop("Only mixtures of <= 3 process are implemented")) + + list(p=p, lambdas=lambdas) +} + +##' Generate samples from a mixture of exponential distributions +##' +##' \code{rmixexp} uses a special definition for the probabilities +##' \code{p_i} to generate random samples from a mixed Poisson distribution +##' with known parameters for each process. In the two-process case, +##' \code{p} represents the proportion of "fast" to "slow" events in the +##' mixture. In the three-process case, \code{p_0} represents the +##' proportion of "fast" to "slow" events, and \code{p_1} represents the +##' proportion of "slow" to "slow" *and* "very slow" events. +##' @param n integer output sample size. +##' @param p numeric probabilities for processes generating the output +##' mixture sample. +##' @param lambdas numeric \code{lambda} (rate) for each process. +##' @return vector of samples. +##' @examples +##' ## Draw samples from a mixture where the first process occurs with +##' ## p < 0.7, and the second process occurs with the remaining +##' ## probability. +##' p <- 0.7 +##' lda <- c(0.05, 0.005) +##' (rndprocs2 <- rmixexp(1000, p, lda)) +##' +##' ## 3-process +##' p_f <- 0.6 # fast to slow +##' p_svs <- 0.7 # prop of slow to (slow + very slow) procs +##' p_true <- c(p_f, p_svs) +##' lda_true <- c(0.05, 0.01, 8e-4) +##' (rndprocs3 <- rmixexp(1000, p_true, lda_true)) +"rmixexp" <- function(n, p, lambdas) { + if (length(lambdas) != (length(p) + 1)) { + stop("lambdas must have one more element than p") + } + switch(as.character(length(p)), + "1" = { p.full <- c(p, 1 - p) }, + "2" = { + p0 <- p[1] + p1 <- p[2] * (1 - p0) + p2 <- 1 - (p0 + p1) + p.full <- c(p0, p1, p2) + }, + stop("Mixtures of more than 3 processes are not yet implemented")) + + chooser <- sample(length(lambdas), n, replace=TRUE, + prob=p.full / sum(p.full)) + rates <- lambdas + rexp(n, rate=rates[chooser]) +} + +##' Logit transformation +##' +##' \code{logit} and \code{unLogit} are helpful for reparameterizing the +##' negative maximum likelihood function, if using Langton et al. (1995). +##' @param p numeric vector of proportions (0-1) to transform to the logit +##' scale. +##' @return \code{unLogit} and \code{logit} return a numeric vector with +##' the (un)transformed arguments. +##' @rdname bouts-internal +"logit" <- function(p) log(p / (1 - p)) + +##' Untransform logit +##' @param logit numeric scalar: logit value to transform back to original +##' scale. +##' @rdname bouts-internal +"unLogit" <- function(logit) exp(logit) / (exp(logit) + 1) + +##' Estimated cumulative frequency for two- or three-process Poisson +##' mixture models +##' @param x numeric vector described by model. +##' @param p numeric scalar or vector of proportion parameters. +##' @param lambdas numeric vector of rate parameters. +##' @return numeric vector with cumulative frequency. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @examples +##' utils::example("rmixexp", package="diveMove", ask=FALSE) +##' ## boutsCDF(rndprocs3, p=p_true, lambdas=lda_true) +"boutsCDF" <- function(x, p, lambdas) { + nprocs <- length(lambdas) + + ## We assume at least two processes + p0 <- p[1] + lda0 <- lambdas[1] + term0 <- 1 - p0 * exp(-lda0 * x) + + switch(as.character(nprocs), + "2" = { + lda1 = lambdas[2] + term1 = (1 - p0) * exp(-lda1 * x) + cdf = term0 - term1 + }, + "3" = { + p1 = p[2] + lda1 = lambdas[2] + term1 = p1 * (1 - p0) * exp(-lda1 * x) + lda2 = lambdas[3] + term2 = (1 - p0) * (1 - p1) * exp(-lda2 * x) + cdf = term0 - term1 - term2 + }, + stop("Only mixtures of <= 3 process are implemented")) + + cdf +} diff --git a/R/calibrate.R b/R/calibrate.R index fac3988..6b74265 100644 --- a/R/calibrate.R +++ b/R/calibrate.R @@ -1,4 +1,285 @@ +##' Calibrate Depth and Generate a "TDRcalibrate" object +##' +##' Detect periods of major activities in a \acronym{TDR} record, calibrate +##' depth readings, and generate a \code{\link{TDRcalibrate}} object +##' essential for subsequent summaries of diving behaviour. +##' +##' This function is really a wrapper around \code{.detPhase}, +##' \code{.detDive}, and \code{.zoc} which perform the work on simplified +##' objects. It performs wet/dry phase detection, zero-offset correction +##' of depth, and detection of dives, as well as proper labelling of the +##' latter. +##' +##' The procedure starts by zero-offset correcting depth (see \sQuote{ZOC} +##' below), and then a factor is created with value \dQuote{L} (dry) for +##' rows with NAs for \code{depth} and value \dQuote{W} (wet) otherwise. +##' This assumes that \acronym{TDR}s were programmed to turn off recording +##' of depth when instrument is dry (typically by means of a salt-water +##' switch). If this assumption cannot be made for any reason, then a +##' logical vector as long as the time series should be supplied as +##' argument \code{wet.cond} to indicate which observations should be +##' considered wet. This argument is directly analogous to the +##' \code{subset} argument in \code{\link{subset.data.frame}}, so it can +##' refer to any variable in the \code{\link{TDR}} object (see +##' \sQuote{Note} section below). The duration of each of these phases of +##' activity is subsequently calculated. If the duration of a dry phase +##' (\dQuote{L}) is less than \code{dry.thr}, then the values in the factor +##' for that phase are changed to \dQuote{W} (wet). The duration of phases +##' is then recalculated, and if the duration of a phase of wet activity is +##' less than \code{wet.thr}, then the corresponding value for the factor +##' is changed to \dQuote{Z} (trivial wet). The durations of all phases +##' are recalculated a third time to provide final phase durations. +##' +##' Some instruments produce a peculiar pattern of missing data near the +##' surface, at the beginning and/or end of dives. The argument +##' \code{interp.wet} may help to rectify this problem by using an +##' interpolating spline function to impute the missing data, constraining +##' the result to a minimum depth of zero. Please note that this optional +##' step is performed after ZOC and before identifying dives, so that +##' interpolation is performed through dry phases coded as wet because +##' their duration was briefer than \code{dry.thr}. Therefore, +##' \code{dry.thr} must be chosen carefully to avoid interpolation through +##' legitimate dry periods. +##' +##' The next step is to detect dives whenever the zero-offset corrected +##' depth in an underwater phase is below the specified dive threshold. A +##' new factor with finer levels of activity is thus generated, including +##' \dQuote{U} (underwater), and \dQuote{D} (diving) in addition to the +##' ones described above. +##' +##' Once dives have been detected and assigned to a period of wet activity, +##' phases within dives are identified using the descent, ascent and wiggle +##' criteria (see \sQuote{Detection of dive phases} below). This procedure +##' generates a factor with levels \dQuote{D}, \dQuote{DB}, \dQuote{B}, +##' \dQuote{BA}, \dQuote{DA}, \dQuote{A}, and \dQuote{X}, breaking the +##' input into descent, descent/bottom, bottom, bottom/ascent, ascent, +##' descent/ascent (ocurring when no bottom phase can be detected) and +##' non-dive (surface), respectively. +##' +##' ## ZOC +##' +##' This procedure is required to correct drifts in the pressure transducer +##' of \acronym{TDR} records and noise in depth measurements. Three +##' methods are available to perform this correction. +##' +##' Method \dQuote{visual} calls \code{\link{plotTDR}}, which plots depth +##' and, optionally, speed vs. time with the ability of zooming in and out +##' on time, changing maximum depths displayed, and panning through time. +##' The button to zero-offset correct sections of the record allows for the +##' collection of \sQuote{x} and \sQuote{y} coordinates for two points, +##' obtained by clicking on the plot region. The first point clicked +##' represents the offset and beginning time of section to correct, and the +##' second one represents the ending time of the section to correct. +##' Multiple sections of the record can be corrected in this manner, by +##' panning through the time and repeating the procedure. In case there's +##' overlap between zero offset corrected windows, the last one prevails. +##' +##' Method \dQuote{offset} can be used when the offset is known in advance, +##' and this value is used to correct the entire time series. Therefore, +##' offset=0 specifies no correction. +##' +##' Method \dQuote{filter} implements a smoothing/filtering mechanism where +##' running quantiles can be applied to depth measurements in a recursive +##' manner (Luque and Fried 2011), using \code{.depth.filter}. The method +##' calculates the first running quantile defined by \code{probs[1]} on a +##' moving window of size \code{k[1]}. The next running quantile, defined +##' by \code{probs[2]} and \code{k[2]}, is applied to the smoothed/filtered +##' depth measurements from the previous step, and so on. The corrected +##' depth measurements (d) are calculated as: +##' +##' \deqn{d=d_{0} - d_{n}}{d=d[0] - d[n]} +##' +##' where \eqn{d_{0}}{d[0]} is original depth and \eqn{d_{n}}{d[n]} is the +##' last smoothed/filtered depth. This method is under development, but +##' reasonable results can be achieved by applying two filters (see +##' \sQuote{Examples}). The default \code{na.rm=TRUE} works well when +##' there are no level shifts between non-NA phases in the data, but +##' \code{na.rm=FALSE} is better in the presence of such shifts. In other +##' words, there is no reason to pollute the moving window with NAs when +##' non-NA phases can be regarded as a continuum, so splicing non-NA phases +##' makes sense. Conversely, if there are level shifts between non-NA +##' phases, then it is better to retain NA phases to help the algorithm +##' recognize the shifts while sliding the window(s). The search for the +##' surface can be limited to specified bounds during smoothing/filtering, +##' so that observations outside these bounds are interpolated using the +##' bounded smoothed/filtered series. +##' +##' Once the whole record has been zero-offset corrected, remaining depths +##' below zero, are set to zero, as these are assumed to indicate values at +##' the surface. +##' +##' ## Detection of dive phases +##' +##' The process for each dive begins by taking all observations below the +##' dive detection threshold, and setting the beginning and end depths to +##' zero, at time steps prior to the first and after the last, +##' respectively. The latter ensures that descent and ascent derivatives +##' are non-negative and non-positive, respectively, so that the end and +##' beginning of these phases are not truncated. The next step is to fit a +##' model to each dive. Two models can be chosen for this purpose: +##' \sQuote{unimodal} (default) and \sQuote{smooth.spline}. +##' +##' Both models consist of a cubic spline, and its first derivative is +##' evaluated to investigate changes in vertical rate. Therefore, at least +##' 4 observations are required for each dive, so the time series is +##' linearly interpolated at equally spaced time steps if this limit is not +##' achieved in the current dive. Wiggles at the beginning and end of the +##' dive are assumed to be zero offset correction errors, so depth +##' observations at these extremes are interpolated between zero and the +##' next observations when this occurs. +##' +##' ### \sQuote{unimodal} +##' +##' In this default model, the spline is constrained to be unimodal +##' (Koellmann et al. 2014), assuming the diver must return to the surface +##' to breathe. The model is fitted using the uniReg package (see +##' \code{\link[uniReg]{unireg}}). This model and constraint are +##' consistent with the definition of dives in air-breathers, so is +##' certainly appropriate for this group of divers. A major advantage of +##' this approach over the next one is that the degree of smoothing is +##' determined via restricted maximum likelihood, and has no influence on +##' identifying the transition between descent and ascent. Therefore, +##' unimodal regression splines make the latter transition clearer compared +##' to using smoothing splines. +##' +##' However, note that dives with less than five samples are fit using +##' smoothing splines (see section below) regardless, as they produce the +##' same fit as unimodal regression but much faster. Therefore, ensure +##' that the parameters for that model are appropriate for the data, +##' although defaults are reasonable. +##' +##' ### \sQuote{smooth.spline} +##' +##' In this model, specified via \code{dive.model="smooth.spline"}, a +##' smoothing spline is used to model each dive (see +##' \code{\link{smooth.spline}}), using the chosen smoothing parameter. +##' +##' Dive phases identified via this model, however, are highly sensitive to +##' the degree of smoothing (\code{smooth.par}) used, thus making it +##' difficult to determine what amount of smoothing is adequate. +##' +##' A comparison of these methods is shown in the Examples section of +##' \code{\link{diveModel}}. +##' +##' The first derivate of the spline is evaluated at a set of knots to +##' calculate the vertical rate throughout the dive and determine the end +##' of descent and beginning of ascent. This set of knots is established +##' using a regular time sequence with beginning and end equal to the +##' extremes of the input sequence, and with length equal to \eqn{N \times +##' knot.factor}{N * \code{knot.factor}}. Equivalent procedures are used +##' for detecting descent and ascent phases. +##' +##' Once one of the models above has been fitted to each dive, the quantile +##' corresponding to (\code{descent.crit.q}) of all the positive +##' derivatives (rate of descent) at the beginning of the dive is used as +##' threshold for determining the end of descent. Descent is deemed to +##' have ended at the \emph{first} minimum derivative, and the nearest +##' input time observation is considered to indicate the end of descent. +##' The sign of the comparisons is reversed for detecting the ascent. If +##' observed depth to the left and right of the derivative defining the +##' ascent are the same, the right takes precedence. +##' +##' The particular dive phase categories are subsequently defined using +##' simple set operations. +##' +##' @param x An object of class \code{\link{TDR}} for +##' \code{\link{calibrateDepth}} or an object of class +##' \code{\link{TDRcalibrate}} for \code{\link{calibrateSpeed}}. +##' @param dry.thr numeric: dry error threshold in seconds. Dry phases +##' shorter than this threshold will be considered as wet. +##' @param wet.cond logical: indicates which observations should be +##' considered wet. If it is not provided, records with non-missing +##' depth are assumed to correspond to wet conditions (see +##' \sQuote{Details} and \sQuote{Note} below). +##' @param wet.thr numeric: wet threshold in seconds. At-sea phases shorter +##' than this threshold will be considered as trivial wet. +##' @param dive.thr numeric: threshold depth below which an underwater +##' phase should be considered a dive. +##' @param zoc.method character string to indicate the method to use for +##' zero offset correction. One of \dQuote{visual}, \dQuote{offset}, +##' or \dQuote{filter} (see \sQuote{Details}). +##' @param ... Arguments required for ZOC methods \code{filter} (\code{k}, +##' \code{probs}, \code{depth.bounds} (defaults to range), \code{na.rm} +##' (defaults to TRUE)) and \code{offset} (\code{offset}). +##' @param interp.wet logical: if TRUE (default is FALSE), then an +##' interpolating spline function is used to impute NA depths in wet +##' periods (\emph{after ZOC}). \emph{Use with caution}: it may only +##' be useful in cases where the missing data pattern in wet periods is +##' restricted to shallow depths near the beginning and end of dives. +##' This pattern is common in some satellite-linked \acronym{TDR}s. +##' @param dive.model character string specifying what model to use for +##' each dive for the purpose of dive phase identification. One of +##' \dQuote{smooth.spline} or \dQuote{unimodal}, to choose among +##' smoothing spline or unimodal regression (see \sQuote{Details}). +##' For dives with less than five observations, smoothing spline +##' regression is used regardless (see \sQuote{Details}). +##' @param smooth.par numeric scalar representing amount of smoothing +##' (argument \code{spar} in \code{\link[stats]{smooth.spline}}) when +##' \code{dive.model="smooth.spline"}. If it is NULL, then the +##' smoothing parameter is determined by Generalized Cross-validation +##' (GCV). Ignored with default \code{dive.model="unimodal"}. +##' @param knot.factor numeric scalar that multiplies the number of samples +##' in the dive. This is used to construct the time predictor for the +##' derivative. +##' @param descent.crit.q numeric: critical quantile of rates of descent +##' below which descent is deemed to have ended. +##' @param ascent.crit.q numeric: critical quantile of rates of ascent +##' above which ascent is deemed to have started. +##' @return An object of class \code{\link{TDRcalibrate}}. +##' @note Note that the condition implied with argument \code{wet.cond} is +##' evaluated after the ZOC procedure, so it can refer to corrected +##' depth. In many cases, not all variables in the \code{\link{TDR}} +##' object are sampled with the same frequency, so they may need to be +##' interpolated before using them for this purpose. Note also that +##' any of these variables may contain similar problems as those dealth +##' with during ZOC, so programming instruments to record depth only +##' when wet is likely the best way to ensure proper detection of +##' wet/dry conditions. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @seealso \code{\link{TDRcalibrate}}, \code{\link{.zoc}}, +##' \code{\link{.depthFilter}}, \code{\link{.detPhase}}, +##' \code{\link{.detDive}}, \code{\link{plotTDR}}, and +##' \code{\link{plotZOC}} to visually assess ZOC procedure. See +##' \code{\link{diveModel}}, \code{\link{smooth.spline}}, +##' \code{\link{unireg}} for dive models. +##' @references +##' +##' Koellmann, C., Ickstadt, K. and Fried, R. (2014) Beyond unimodal +##' regression: modelling multimodality with piecewise unimodal, mixture or +##' additive regression. Technical Report 8. +##' \url{https://sfb876.tu-dortmund.de/FORSCHUNG/techreports.html}, SFB 876, TU +##' Dortmund +##' +##' Luque, S.P. and Fried, R. (2011) Recursive filtering for zero offset +##' correction of diving depth time series. PLoS ONE 6:e15850 +##' @keywords manip math +##' @examples +##' data(divesTDR) +##' divesTDR +##' +##' \donttest{## Too long for checks +##' ## Consider a 3 m offset, a dive threshold of 3 m, the 1% quantile for +##' ## critical vertical rates, and a set of knots 20 times as long as the +##' ## observed time steps. Default smoothing spline model for dive phase +##' ## detection, using default smoothing parameter. +##' (dcalib <- calibrateDepth(divesTDR, dive.thr=3, zoc.method="offset", +##' offset=3, descent.crit.q=0.01, ascent.crit.q=0, +##' knot.factor=20)) +##' +##' ## Or ZOC algorithmically with method="filter": +##' ## dcalib <- calibrateDepth(divesTDR, dive.thr=3, zoc.method="filter", +##' ## k=c(3, 5760), probs=c(0.5, 0.02), na.rm=TRUE, +##' ## descent.crit.q=0.01, ascent.crit.q=0, +##' ## knot.factor=20)) +##' +##' ## If no ZOC required: +##' data(divesTDRzoc) +##' (dcalib <- calibrateDepth(divesTDRzoc, dive.thr=3, zoc.method="offset", +##' offset=0, descent.crit.q=0.01, ascent.crit.q=0, +##' knot.factor=20)) +##' +##' } "calibrateDepth" <- function(x, dry.thr=70, wet.cond, wet.thr=3610, dive.thr=4, zoc.method=c("visual", "offset", "filter"), @@ -7,24 +288,6 @@ smooth.par=0.1, knot.factor=3, descent.crit.q=0, ascent.crit.q=0) { - ## Value: A TDRcalibrate object. Detect water/land phases in TDR - ## object, zoc data, detect dives and their phases, and label them. - ## Return a TDRcalibrate object. - ## -------------------------------------------------------------------- - ## Arguments: x=a TDR object; dry.thr, wet.cond, wet.thr, and dive.thr - ## see .detPhase and .detDive; descent.crit, ascent.crit, and - ## wiggle.tol see .labDivePhase documentation; zoc.method=method to use - ## for zero-offset correction; ...=arguments required for ZOC methods - ## zoc.filter (k, probs, depth.bounds, na.rm (defaults to TRUE)) and - ## offset (offset); interp.wet=logical (proposal) to control whether we - ## interpolate NA depths in wet periods (*after ZOC*). Be careful with - ## latter, which uses an interpolating spline to impute the missing - ## data. 'dive.model', 'smooth.par', 'knot.factor', 'descent.crit.q', - ## and 'ascent.crit.q' are arguments passed to .cutDive() via - ## .labDivePhase(). - ## -------------------------------------------------------------------- - ## Author: Sebastian Luque - ## -------------------------------------------------------------------- if (!is(x, "TDR")) stop ("x is not a TDR object") mCall <- match.call() depth <- getDepth(x) @@ -95,27 +358,58 @@ } +##' Calibrate and build a "TDRcalibrate" object +##' +##' These functions create a \code{\link{TDRcalibrate}} object which is +##' necessary to obtain dive summary statistics. +##' +##' This calibrates speed readings following the procedure outlined in +##' Blackwell et al. (1999). +##' +##' @param x An object of class \code{\link{TDR}} for +##' \code{\link{calibrateDepth}} or an object of class +##' \code{\link{TDRcalibrate}} for \code{\link{calibrateSpeed}}. +##' @param tau numeric scalar: quantile on which to regress speed on rate +##' of depth change; passed to \code{\link[quantreg]{rq}}. +##' @param contour.level numeric scalar: the mesh obtained from the +##' bivariate kernel density estimation corresponding to this contour +##' will be used for the quantile regression to define the calibration +##' line. +##' @param z numeric scalar: only changes in depth larger than this value +##' will be used for calibration. +##' @param bad numeric vector of length 2 indicating that only rates of +##' depth change and speed greater than the given value should be used +##' for calibration, respectively. +##' @param coefs numeric: known speed calibration coefficients from +##' quantile regression as a vector of length 2 (intercept, slope). If +##' provided, these coefficients are used for calibrating speed, +##' ignoring all other arguments, except \code{x}. +##' @param main,... Arguments passed to \code{\link{rqPlot}}. +##' @param plot logical: whether to plot the results. +##' @param postscript logical: whether to produce postscript file output. +##' @return An object of class \code{\link{TDRcalibrate}}. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @seealso \code{\link{TDRcalibrate}} +##' @references +##' Blackwell S, Haverl C, Le Boeuf B, Costa D (1999). A method for calibrating +##' swim-speed recorders. Marine Mammal Science 15(3):894-905. +##' @keywords manip math +##' @examples +##' \donttest{## Too long for checks +##' ## Continuing the Example from '?calibrateDepth': +##' utils::example("calibrateDepth", package="diveMove", +##' ask=FALSE, echo=FALSE, run.donttest=TRUE) +##' dcalib # the 'TDRcalibrate' that was created +##' +##' ## Calibrate speed using only changes in depth > 2 m +##' vcalib <- calibrateSpeed(dcalib, z=2) +##' vcalib +##' +##' } "calibrateSpeed" <- function(x, tau=0.1, contour.level=0.1, z=0, bad=c(0, 0), main=slot(getTDR(x), "file"), coefs, plot=TRUE, postscript=FALSE, ...) { - ## Value: list with data frame of rate of depth change and speed, the - ## bivariate kernel densities, and the quantile regression object with - ## calibration line. - ## -------------------------------------------------------------------- - ## Arguments: x=TDRcalibrate object; tau=quantile on which to perform - ## the regression; contour.level=contour to extract the mesh from the - ## binned bivariate kernel density estimation (0-1); z=only changes in - ## depth > than this will be used; bad=vector with rate of depth - ## change and speed, respectively, indicating that only values greater - ## than those will be used, main=string for the title of plot; - ## coefs=intercept and slope of the calibration line, if already - ## known; plot=logical indicating whether to produce a plot; - ## postscript=logical for whether to produce postscript output; - ## ...=optional arguments for rqPlot(). - ## -------------------------------------------------------------------- - ## Author: Sebastian Luque - ## -------------------------------------------------------------------- if (!is(x, "TDRcalibrate")) { stop("x must be a TDRcalibrate object") } else if (!is(x@tdr, "TDRspeed")) { @@ -165,26 +459,41 @@ } } - +##' Plot of quantile regression for speed calibrations +##' +##' Plot of quantile regression for assessing quality of speed calibrations +##' +##' The dashed line in the plot represents a reference indicating a one to +##' one relationship between speed and rate of depth change. The other +##' line represent the quantile regression fit. +##' +##' @param speed numeric vector with speed in m/s. +##' @param rddepth numeric vector with rate of depth change. +##' @param z list with the bivariate kernel density estimates (1st +##' component the x points of the mesh, 2nd the y points, and 3rd the +##' matrix of densities). +##' @param contours list with components: \code{pts} which should be a +##' matrix with columns named \code{x} and \code{y}, \code{level} a +##' number indicating the contour level the points in \code{pts} +##' correspond to. +##' @param rqFit object of class \dQuote{rq} representing a quantile +##' regression fit of rate of depth change on mean speed. +##' @param main character: string with title prefix to include in ouput +##' plot. +##' @param xlab,ylab character vectors with axis labels. +##' @param colramp function taking an integer n as an argument and +##' returning n colors. +##' @param col.line color to use for the regression line. +##' @param cex.pts numeric: value specifying the amount by which to enlarge +##' the size of points. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @seealso \code{\link{diveStats}} +##' @keywords manip arith hplot "rqPlot" <- function(rddepth, speed, z, contours, rqFit, main="qtRegression", xlab="rate of depth change (m/s)", ylab="speed (m/s)", colramp=colorRampPalette(c("white", "darkblue")), col.line="red", cex.pts=1) { - ## Value: A quantile regression plot for TDR speed calibration - ## -------------------------------------------------------------------- - ## Arguments: rddepth=rate of depth change (m), speed=speed (m/s), - ## contours=list of 2 components (pts, a matrix with columns named x - ## and y containing the points from the contour; and level a number - ## indicating the contour level the points correspond to); - ## rqFit=quantile regression fit object; z=a list with the bivariate - ## kernel density estimates (1st component the x points of the mesh, - ## 2nd the y points, and 3rd the matrix of densities); main=title to - ## display in the plot; xlab and ylab=axis titles; colramp=color - ## function for the densities. - ## -------------------------------------------------------------------- - ## Author: Sebastian Luque - ## -------------------------------------------------------------------- axlims <- range(rddepth, speed, na.rm=TRUE) old.par <- par(no.readonly=TRUE) on.exit(par(old.par)) diff --git a/R/detDive.R b/R/detDive.R index db2f239..5c085d0 100644 --- a/R/detDive.R +++ b/R/detDive.R @@ -24,17 +24,47 @@ } +##' Detect dives from depth readings +##' +##' Identify dives in \acronym{TDR} records based on a dive threshold. +##' +##' @name detDive-internal +##' @aliases .detDive +##' @param zdepth numeric vector of zero-offset corrected depths. +##' @param act factor as long as \code{depth} coding activity, with levels +##' specified as in \code{\link{.detPhase}}. +##' @param dive.thr numeric scalar: threshold depth below which an +##' underwater phase should be considered a dive. +##' @return A \code{\link{data.frame}} with the following elements for +##' \code{.detDive} +##' +##' \item{dive.id}{Numeric vector numbering each dive in the record.} +##' +##' \item{dive.activity}{Factor with levels \dQuote{L}, \dQuote{W}, +##' \dQuote{U}, \dQuote{D}, and \dQuote{Z}, see \code{\link{.detPhase}}. +##' All levels may be represented.} +##' +##' \item{postdive.id}{Numeric vector numbering each postdive interval with +##' the same value as the preceding dive.} +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @seealso \code{\link{.detPhase}}, \code{\link{.zoc}} +##' @keywords internal +##' @examples +##' \donttest{## Too long for checks +##' ## Continuing the Example from '?calibrateDepth': +##' utils::example("calibrateDepth", package="diveMove", +##' ask=FALSE, echo=FALSE, run.donttest=TRUE) +##' dcalib # the 'TDRcalibrate' that was created +##' +##' tdr <- getTDR(dcalib) +##' +##' ## Extract the gross activity from an already calibrated TDR object +##' gross.act <- getGAct(dcalib) +##' detd <- diveMove:::.detDive(getDepth(tdr), gross.act[[2]], 3) +##' +##' } ".detDive" <- function(zdepth, act, dive.thr) { - ## Value: A data frame; detecting dives, using a depth threshold - ## -------------------------------------------------------------------- - ## Arguments: zdepth=depth vector of zoc'ed data, act=factor with - ## dry/wet activity IDs (2nd element returned by detPhase), with values - ## "W" for at-sea, dive.thr=dive threshold in m - ## -------------------------------------------------------------------- - ## Author: Sebastian Luque - ## -------------------------------------------------------------------- - ## Get the indices of below surface activity and label as "U" underw <- which((act == "W" | act == "Z") & zdepth > 0) if (length(underw) > 0) { act[underw] <- "U" diff --git a/R/detPhase.R b/R/detPhase.R index 05a0a09..66a611b 100644 --- a/R/detPhase.R +++ b/R/detPhase.R @@ -24,23 +24,56 @@ end.time=endtim) } +##' Detect phases of activity from depth readings +##' +##' Functions to identify sections of a \acronym{TDR} record displaying one +##' of three possible activities: dry, wet, and trivial wet. +##' +##' See \code{\link{calibrateDepth}}. +##' +##' @aliases .detPhase +##' @param time \code{POSIXct} object with date and time for all depths. +##' @param depth numeric vector with depth readings. +##' @param dry.thr,wet.cond,wet.thr As passed from +##' \code{\link{calibrateDepth}}. +##' @param interval As passed from \code{\link{calibrateDepth}}; sampling +##' interval in seconds. +##' @return A list with components: +##' +##' \item{phase.id}{Numeric vector identifying each activity phase, +##' starting from 1 for every input record.} +##' +##' \item{activity}{Factor with levels \dQuote{L} indicating dry, +##' \dQuote{W} indicating wet, \dQuote{U} for underwater (above dive +##' criterion), \dQuote{D} for diving, \dQuote{Z} for trivial wet animal +##' activities. Only \dQuote{L}, \dQuote{W}, and \dQuote{Z} are actually +##' represented.} +##' +##' \item{begin}{A \code{\link{POSIXct}} object as long as the number of +##' unique activity phases identified, indicating the start times for each +##' activity phase.} +##' +##' \item{end}{A \code{\link{POSIXct}} object as long as the number of +##' unique activity phases identified, indicating the end times for each +##' activity phase.} +##' @author Sebastian P. Luque \email{spluque@@gmail.com} and Andy Liaw. +##' @seealso \code{\link{.detDive}}, \code{\link{calibrateDepth}} +##' @keywords internal +##' @rdname detPhase-internal +##' @examples +##' data(divesTDR) +##' depths <- getDepth(divesTDR) +##' times <- getTime(divesTDR) +##' +##' detp <- diveMove:::.detPhase(times, depths, dry.thr=70, wet.thr=3610, +##' interval=getDtime(divesTDR)) +##' ## Plot detected phases +##' plotTDR(times, depths) +##' rect(xleft=detp$begin, xright=detp$end, ybottom=0, ytop=-4, +##' col=seq_along(detp$begin)) ".detPhase" <- function(time, depth, dry.thr, wet.cond, wet.thr, interval) { - ## Value: list with index of per-row activities, the activity code, and - ## start and end of each activity phase - ## -------------------------------------------------------------------- - ## Arguments: time=POSIXct vector; depth=numeric vector with depth - ## readings (m); dry.thr=duration (in s) of on-land readings that - ## should be considered at-sea; wet.thr=duration (in s) of at-sea - ## readings to be taken as leisure; wet.cond=logical indicating which - ## observations should be considered wet (only needed when instrument - ## did not have a salt-water switch turning off recording of depth, or - ## when it was inappropriately used); interval=sampling interval in - ## POSIXct units (s), to pass to rleActivity - ## -------------------------------------------------------------------- - ## Author: Sebastian Luque - ## -------------------------------------------------------------------- ## Factor with default "land" values to code activity levels: L=land, ## W=wet (at-sea), U=underwater (below dive threshold), D=diving, Z=wet ## (leisure) diff --git a/R/distSpeed.R b/R/distSpeed.R index 0ff786f..586e752 100644 --- a/R/distSpeed.R +++ b/R/distSpeed.R @@ -1,4 +1,52 @@ - +##' Calculate distance and speed between locations +##' +##' Calculate distance, time difference, and speed between pairs of points +##' defined by latitude and longitude, given the time at which all points were +##' measured. +##' +##' @param pt1 A matrix or \code{\link{data.frame}} with three columns; the +##' first a \code{POSIXct} object with dates and times for all points, the +##' second and third numeric vectors of longitude and latitude for all points, +##' respectively, in decimal degrees. +##' @param pt2 A matrix with the same size and structure as \code{pt1}. +##' @param method character indicating which of the distance algorithms from +##' \code{\link[geosphere]{geosphere-package}} to use (only default parameters +##' used). Only \code{Meeus} and \code{VincentyEllipsoid} are supported for +##' now. +##' @return A matrix with three columns: distance (km), time difference (s), +##' and speed (m/s). +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @keywords math manip +##' @examples +##' ## Using the Example from '?readLocs': +##' utils::example("readLocs", package="diveMove", +##' ask=FALSE, echo=FALSE) +##' +##' ## Travel summary between successive standard locations +##' locs.std <- subset(locs, subset=class == "0" | class == "1" | +##' class == "2" | class == "3" & +##' !is.na(lon) & !is.na(lat)) +##' ## Default Meeus method +##' locs.std.tr <- by(locs.std, locs.std$id, function(x) { +##' distSpeed(x[-nrow(x), 3:5], x[-1, 3:5]) +##' }) +##' lapply(locs.std.tr, head) +##' +##' ## Particular quantiles from travel summaries +##' lapply(locs.std.tr, function(x) { +##' quantile(x[, 3], seq(0.90, 0.99, 0.01), na.rm=TRUE) # speed +##' }) +##' lapply(locs.std.tr, function(x) { +##' quantile(x[, 1], seq(0.90, 0.99, 0.01), na.rm=TRUE) # distance +##' }) +##' +##' ## Travel summary between two arbitrary sets of points +##' pts <- seq(10) +##' (meeus <- distSpeed(locs[pts, 3:5], locs[pts + 1, 3:5])) +##' (vincenty <- distSpeed(locs[pts, 3:5], +##' locs[pts + 1, 3:5], +##' method="VincentyEllipsoid")) +##' meeus - vincenty "distSpeed" <- function(pt1, pt2, method=c("Meeus", "VincentyEllipsoid")) { ## Value: A 3-column matrix with distance, time elapsed and speed diff --git a/R/diveMove-defunct.R b/R/diveMove-defunct.R new file mode 100644 index 0000000..f5d117e --- /dev/null +++ b/R/diveMove-defunct.R @@ -0,0 +1,32 @@ + +##' Defunct functions in package \sQuote{diveMove} +##' +##' These functions are defunct and no longer available. +##' +##' @name diveMove-defunct +##' @keywords internal +NULL + +##' @rdname diveMove-defunct +##' @section \code{bouts2.ll} and \code{bouts2.LL}: +##' These functions have been superseded by the new function generator +##' \code{\link{boutsMLEll.chooser}} +"bouts2.ll" <- function() { + .Defunct("boutsMLEll.chooser", package="diveMove", + msg=paste("'bouts2.ll' is now defunct.", + "Please see help(boutsMLEll.chooser)")) +} + +##' @rdname diveMove-defunct +"bouts2.LL" <- function() { + .Defunct("boutsMLEll.chooser", package="diveMove", + msg=paste("'bouts2.LL' is now defunct.", + "Please see help(boutsMLEll.chooser)")) +} + +##' @rdname diveMove-defunct +"bouts.mle" <- function() { + .Defunct("fitMLEbouts", package="diveMove", + msg=paste("'bouts.mle' is now defunct.", + "Please see help(fitMLEbouts)")) +} diff --git a/R/diveMove-deprecated.R b/R/diveMove-deprecated.R new file mode 100644 index 0000000..6e57933 --- /dev/null +++ b/R/diveMove-deprecated.R @@ -0,0 +1,89 @@ + +##' Deprecated functions in diveMove +##' +##' These functions are provided for compatibility with older versions of +##' \sQuote{diveMove} only, and will be removed (defunct) in the next +##' release. +##' +##' @name diveMove-deprecated +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @keywords internal +NULL + +##' @rdname diveMove-deprecated +##' @section \code{bouts2.nlsFUN}: +##' For \code{bouts2.nlsFUN}, use \code{\link{boutsNLSll}}. +"bouts2.nlsFUN" <- function(x, a1, lambda1, a2, lambda2) { + .Deprecated("boutsNLSll", package="diveMove", + msg=paste("'bouts2.nlsFUN' is now deprecated in favor of", + "the new generalized 'boutsNLSll'.", + "Please see help(boutsNLSll)")) + boutsNLSll(x, coefs=c(a1, lambda1, a2, lambda2)) +} + +##' @rdname diveMove-deprecated +##' @section \code{bouts2.nls}: +##' For \code{bouts2.nls}, use \code{\link{fitNLSbouts}}. +"bouts2.nls" <- function(lnfreq, start, maxiter) { + .Deprecated("fitNLSbouts", package="diveMove", + msg=paste("'bouts2.nls' is now deprecated in favor of", + "the new method 'fitNLSbouts'.", + "Please see help(fitNLSbouts)")) + fitNLSbouts(lnfreq, start=start, maxiter=maxiter) +} + +##' @rdname diveMove-deprecated +##' @section \code{bec2}: +##' For \code{bec2}, use \code{\link{bec}}. +"bec2" <- function(fit) { + .Deprecated("bec", package="diveMove", + msg=paste("'bec2' is now deprecated in favor of", + "the new generalized method 'bec'.", + "Please see help(bec)")) + bec(fit) +} + +##' @rdname diveMove-deprecated +##' @section \code{bec3}: +##' For \code{bec3}, use \code{\link{bec}}. +"bec3" <- function(fit) { + .Deprecated("bec", package="diveMove", + msg=paste("'bec3' is now deprecated in favor of", + "the new generalized method 'bec'.", + "Please see help(bec)")) + bec(fit) +} + +##' @rdname diveMove-deprecated +##' @section \code{bouts3.nlsFUN}: +##' For \code{bouts3.nlsFUN}, use \code{\link{boutsNLSll}}. +"bouts3.nlsFUN" <- function(x, a1, lambda1, a2, lambda2, a3, lambda3) { + .Deprecated("boutsNLSll", package="diveMove", + msg=paste("'bouts3.nlsFUN' is now deprecated in favor of", + "the new generalized 'boutsNLSll'.", + "Please see help(boutsNLSll)")) + boutsNLSll(x, coefs=c(a1, lambda1, a2, lambda2, a3, lambda3)) +} + +##' @rdname diveMove-deprecated +##' @section \code{bouts3.nls}: +##' For \code{bouts3.nls}, use \code{\link{fitNLSbouts}}. +"bouts3.nls" <- function(lnfreq, start, maxiter) { + .Deprecated("fitNLSbouts", package="diveMove", + msg=paste("'bouts3.nls' is now deprecated in favor of", + "the new method 'fitNLSbouts'.", + "Please see help(fitNLSbouts)")) + fitNLSbouts(lnfreq, start=start, maxiter=maxiter) +} + +##' @rdname diveMove-deprecated +##' @section \code{bouts2.mleFUN}: +##' For \code{bouts2.mleFUN}, use \code{\link{.bouts2MLEll}}. +"bouts2.mleFUN" <- function(x, p, lambda1, lambda2) { + msg <- paste("'bouts3.nlsFUN' is now deprecated in favor", + "the new generalized '.bouts2MLEll', which should not be", + "used directly, but rather via 'fitMLEbouts'.", + "Please see help(fitMLEbouts))") + .Deprecated(".bouts2MLEll", package="diveMove", msg=msg) + .bouts2MLEll(x, p=p, lambda0=lambda1, lambda1=lambda2) +} diff --git a/R/diveStats.R b/R/diveStats.R index 3778fa6..d1e49af 100644 --- a/R/diveStats.R +++ b/R/diveStats.R @@ -35,14 +35,116 @@ cbind(d, b, a) } +##' Per-dive statistics +##' +##' Calculate dive statistics in \acronym{TDR} records. +##' +##' \code{diveStats} calculates various dive statistics based on time and +##' depth for an entire \acronym{TDR} record. \code{oneDiveStats} obtains +##' these statistics from a single dive, and \code{stampDive} stamps each +##' dive with associated phase information. +##' +##' @aliases diveStats +##' @param x A \code{\link{TDRcalibrate-class}} object for \code{diveStats} +##' and \code{stampDive}, and a \code{\link{data.frame}} containing a +##' single dive's data (a factor identifying the dive phases, a POSIXct +##' object with the time for each reading, a numeric depth vector, and +##' a numeric speed vector) for \code{oneDiveStats}. +##' @param depth.deriv logical: should depth derivative statistics be +##' calculated? +##' @param interval numeric scalar: sampling interval for interpreting +##' \code{x}. +##' @param speed logical: should speed statistics be calculated? +##' @param ignoreZ logical: whether phases should be numbered considering +##' all aquatic activities (\dQuote{W} and \dQuote{Z}) or ignoring +##' \dQuote{Z} activities. +##' @return A \code{\link{data.frame}} with one row per dive detected +##' (durations are in s, and linear variables in m): +##' +##' \item{begdesc}{A \code{POSIXct} object, specifying the start time of +##' each dive.} +##' +##' \item{enddesc}{A \code{POSIXct} object, as \code{begdesc} indicating +##' descent's end time.} +##' +##' \item{begasc}{A \code{POSIXct} object, as \code{begdesc} indicating the +##' time ascent began.} +##' +##' \item{desctim}{Descent duration of each dive.} +##' +##' \item{botttim}{Bottom duration of each dive.} +##' +##' \item{asctim}{Ascent duration of each dive.} +##' +##' \item{divetim}{Dive duration.} +##' +##' \item{descdist}{Numeric vector with last descent depth.} +##' +##' \item{bottdist}{Numeric vector with the sum of absolute depth +##' differences while at the bottom of each dive; measure of amount of +##' \dQuote{wiggling} while at bottom.} +##' +##' \item{ascdist}{Numeric vector with first ascent depth.} +##' +##' \item{bottdep.mean}{Mean bottom depth.} +##' +##' \item{bottdep.median}{Median bottom depth.} +##' +##' \item{bottdep.sd}{Standard deviation of bottom depths.} +##' +##' \item{maxdep}{Numeric vector with maximum depth.} +##' +##' \item{desc.tdist}{Numeric vector with descent total distance, estimated +##' from speed.} +##' +##' \item{desc.mean.speed}{Numeric vector with descent mean speed.} +##' +##' \item{desc.angle}{Numeric vector with descent angle, from the surface +##' plane.} +##' +##' \item{bott.tdist}{Numeric vector with bottom total distance, estimated +##' from speed.} +##' +##' \item{bott.mean.speed}{Numeric vector with bottom mean speed.} +##' +##' \item{asc.tdist}{Numeric vector with ascent total distance, estimated +##' from speed.} +##' +##' \item{asc.mean.speed}{Numeric vector with ascent mean speed.} +##' +##' \item{asc.angle}{Numeric vector with ascent angle, from the bottom plane.} +##' +##' \item{postdive.dur}{Postdive duration.} +##' +##' \item{postdive.tdist}{Numeric vector with postdive total distance, +##' estimated from speed.} +##' +##' \item{postdive.mean.speed}{Numeric vector with postdive mean speed.} +##' +##' If \code{depth.deriv=TRUE}, 21 additional columns with the minimum, +##' first quartile, median, mean, third quartile, maximum, and standard +##' deviation of the depth derivative for each phase of the dive. The +##' number of columns also depends on argument \code{speed}. +##' +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @seealso \code{\link{calibrateDepth}}, \code{\link{.detPhase}}, +##' \code{\link{TDRcalibrate-class}} +##' @keywords arith math +##' @examples +##' \donttest{## Too long for checks +##' ## Continuing the Example from '?calibrateDepth': +##' utils::example("calibrateDepth", package="diveMove", +##' ask=FALSE, echo=FALSE, run.donttest=TRUE) +##' dcalib # the 'TDRcalibrate' that was created +##' +##' tdrX <- diveStats(dcalib) +##' stamps <- stampDive(dcalib, ignoreZ=TRUE) +##' tdrX.tab <- data.frame(stamps, tdrX) +##' summary(tdrX.tab) +##' +##' } "diveStats" <- function(x, depth.deriv=TRUE) { - ## Value: A data frame with per-dive statistics - ## -------------------------------------------------------------------- - ## Arguments: x=object of class TDRcalibrate - ## -------------------------------------------------------------------- - ## Author: Sebastian Luque - ## -------------------------------------------------------------------- if (!is(x, "TDRcalibrate")) stop("x must be a TDRcalibrate object") zvtdr <- getTDR(x) # fully calibrated object interval <- getDtime(zvtdr) # sampling interval diff --git a/R/oneDiveStats.R b/R/oneDiveStats.R index 14c22ea..d95cda2 100644 --- a/R/oneDiveStats.R +++ b/R/oneDiveStats.R @@ -1,4 +1,8 @@ +##' @describeIn diveStats Calculate dive statistics for a single dive +##' @param interval numeric scalar: sampling interval for interpreting +##' \code{x}. +##' @param speed logical: should speed statistics be calculated? "oneDiveStats" <- function(x, interval, speed=FALSE) { ## Value: A matrix with time/depth stats for each dive segment diff --git a/R/readLocs.R b/R/readLocs.R index 1332652..8c81906 100644 --- a/R/readLocs.R +++ b/R/readLocs.R @@ -1,23 +1,57 @@ +##' Read comma-delimited file with location data +##' +##' Read a delimited (*.csv) file with (at least) time, latitude, longitude +##' readings. +##' +##' The file must have a header row identifying each field, and all rows +##' must be complete (i.e. have the same number of fields). Field names +##' need not follow any convention. +##' +##' @param locations character: a string indicating the path to the file to +##' read, or a \code{\link{data.frame}} available in the search +##' list. Provide the entire path if the file is not on the current +##' directory. This can also be a text-mode connection, as allowed in +##' \code{\link{read.csv}}. +##' @param loc.idCol integer: column number containing location ID. If +##' missing, a \code{loc.id} column is generated with sequential +##' integers as long as the input. +##' @param idCol integer: column number containing an identifier for +##' locations belonging to different groups. If missing, an id column +##' is generated with number one repeated as many times as the input. +##' @param dateCol integer: column number containing dates, and, +##' optionally, times. +##' @param timeCol integer: column number containing times. +##' @param dtformat character: a string specifying the format in which the +##' date and time columns, when pasted together, should be interpreted +##' (see \code{\link{strptime}}) in \code{file}. +##' @param tz character: a string indicating the time zone for the date and +##' time readings. +##' @param lonCol integer: column number containing longitude readings. +##' @param latCol integer: column number containing latitude readings. +##' @param classCol integer: column number containing the ARGOS rating for +##' each location. +##' @param alt.lonCol integer: column number containing alternative +##' longitude readings. +##' @param alt.latCol integer: Column number containing alternative +##' latitude readings. +##' @param ... Passed to \code{\link{read.csv}} +##' @return A data frame. +##' @author Sebastian P. Luque \email{spluque@@gmail.com} +##' @keywords manip +##' @examples +##' ## Do example to define object zz with location of dataset +##' utils::example("sealLocs", package="diveMove", +##' ask=FALSE, echo=FALSE) +##' locs <- readLocs(zz, idCol=1, dateCol=2, +##' dtformat="%Y-%m-%d %H:%M:%S", classCol=3, +##' lonCol=4, latCol=5, sep=";") +##' +##' summary(locs) "readLocs" <- function(locations, loc.idCol, idCol, dateCol, timeCol=NULL, dtformat="%m/%d/%Y %H:%M:%S", tz="GMT", classCol, lonCol, latCol, alt.lonCol=NULL, alt.latCol=NULL, ...) { - ## Value: A data frame with ARGOS locations. - ## -------------------------------------------------------------------- - ## Arguments: locations=quoted file name, including path, of file to - ## read, or data.frame with data to read, or a text-mode connection, - ## loc.idCol=column number containing the location id, idCol=column - ## number identifying locations belonging to different groups, - ## dateCol=column number containing dates and, optionally, times, - ## timeCol=optional column number containing times, latCol and - ## lonCol=latitude and longitude column numbers, respectively, - ## alt.latCol and alt.lonCol=alternative latitude and longitude - ## columns, respectively, classCol=ARGOS classification; ...= passed to - ## read.csv() - ## -------------------------------------------------------------------- - ## Author: Sebastian Luque - ## -------------------------------------------------------------------- if (inherits(locations, "connection") || (is.character(locations) && file.exists(locations))) { srcfile.name <- ifelse(inherits(locations, "connection"), diff --git a/R/readTDR.R b/R/readTDR.R index 019ac31..1cb478f 100644 --- a/R/readTDR.R +++ b/R/readTDR.R @@ -18,6 +18,7 @@ interval } +##' @describeIn createTDR Create TDR object from file "readTDR" <- function(file, dateCol=1, timeCol=2, depthCol=3, speed=FALSE, subsamp=5, concurrentCols=4:6, dtformat="%d/%m/%Y %H:%M:%S", tz="GMT", ...) diff --git a/R/runquantile.R b/R/runquantile.R index 2233a62..005e531 100644 --- a/R/runquantile.R +++ b/R/runquantile.R @@ -1,5 +1,187 @@ ## Copied and modified from caTools +##' Quantile of Moving Window +##' +##' Moving (aka running, rolling) Window Quantile calculated over a vector +##' +##' Apart from the end values, the result of y = runquantile(x, k) is the +##' same as \dQuote{\code{for(j=(1+k2):(n-k2)) +##' y[j]=quintile(x[(j-k2):(j+k2)],na.rm = TRUE)}}. It can handle +##' non-finite numbers like NaN's and Inf's (like \code{\link{quantile}(x, +##' na.rm = TRUE)}). +##' +##' The main incentive to write this set of functions was relative slowness +##' of majority of moving window functions available in R and its packages. +##' All functions listed in "see also" section are slower than very +##' inefficient \dQuote{\code{\link{apply}(\link{embed}(x,k),1,FUN)}} +##' approach. Relative speeds of \code{runquantile} is O(n*k) +##' +##' Function \code{runquantile} uses insertion sort to sort the moving +##' window, but gain speed by remembering results of the previous +##' sort. Since each time the window is moved, only one point changes, all +##' but one points in the window are already sorted. Insertion sort can fix +##' that in O(k) time. +##' +##' @aliases .runquantile +##' @param x numeric vector of length n or matrix with n rows. If \code{x} +##' is a matrix than each column will be processed separately. +##' @param k width of moving window; must be an integer between one and n. +##' @param endrule +##' character string indicating how the values at the beginning and the +##' end, of the array, should be treated. Only first and last \code{k2} +##' values at both ends are affected, where \code{k2} is the half-bandwidth +##' \code{k2 = k \%/\% 2}. +##' +##' * \code{"quantile"} Applies the \code{\link{quantile}} function to +##' smaller and smaller sections of the array. Equivalent to: \code{for(i +##' in 1:k2) out[i]=quantile(x[1:(i+k2)])}. +##' * \code{"trim"} Trim the ends; output array length is equal to +##' \code{length(x)-2*k2 (out = out[(k2+1):(n-k2)])}. This option mimics +##' output of \code{\link{apply}} \code{(\link{embed}(x,k),1,FUN)} and +##' other related functions. +##' * \code{"keep"} Fill the ends with numbers from \code{x} vector +##' \code{(out[1:k2] = x[1:k2])} +##' * \code{"constant"} Fill the ends with first and last calculated value +##' in output array \code{(out[1:k2] = out[k2+1])} +##' * \code{"NA"} Fill the ends with NA's \code{(out[1:k2] = NA)} +##' * \code{"func"} Same as \code{"quantile"} but implimented in R. This +##' option could be very slow, and is included mostly for testing +##' +##' @param probs numeric vector of probabilities with values in [0,1] range +##' used by \code{runquantile}. +##' @param type an integer between 1 and 9 selecting one of the nine +##' quantile algorithms, same as \code{type} in \code{\link{quantile}} +##' function. Another even more readable description of nine ways to +##' calculate quantiles can be found at +##' \url{http://mathworld.wolfram.com/Quantile.html}. +##' @param align specifies whether result should be centered (default), +##' left-aligned or right-aligned. If \code{endrule}="quantile" then +##' setting \code{align} to "left" or "right" will fall back on slower +##' implementation equivalent to \code{endrule}="func". +##' @return +##' If \code{x} is a matrix than function \code{runquantile} returns a +##' matrix of size [n \eqn{\times}{x} \code{\link{length}}(probs)]. If +##' \code{x} is vactor a than function \code{runquantile} returns a matrix +##' of size [\code{\link{dim}}(x) \eqn{\times}{x} +##' \code{\link{length}}(probs)]. If \code{endrule="trim"} the output will +##' have fewer rows. +##' @author Jarek Tuszynski (SAIC) \email{jaroslaw.w.tuszynski@@saic.com} +##' @references +##' About quantiles: Hyndman, R. J. and Fan, Y. (1996) \emph{Sample +##' quantiles in statistical packages, American Statistician}, 50, 361. +##' +##' About quantiles: Eric W. Weisstein. \emph{Quantile}. From MathWorld-- A +##' Wolfram Web Resource. \url{http://mathworld.wolfram.com/Quantile.html} +##' +##' About insertion sort used in \code{runmad} and \code{runquantile}: R. +##' Sedgewick (1988): \emph{Algorithms}. Addison-Wesley (page 99) +##' +##' @keywords ts smooth array utilities +##' @concept moving min +##' @concept rolling min +##' @concept running min +##' @concept moving max +##' @concept rolling max +##' @concept running max +##' @concept moving minimum +##' @concept rolling minimum +##' @concept running minimum +##' @concept moving maximum +##' @concept rolling maximum +##' @concept running maximum +##' @concept moving quantile +##' @concept rolling quantile +##' @concept running quantile +##' @concept moving percentile +##' @concept rolling percentile +##' @concept running percentile +##' @concept moving window +##' @concept rolling window +##' @concept running window +##' @examples +##' ## show plot using runquantile +##' k <- 31; n <- 200 +##' x <- rnorm(n, sd=30) + abs(seq(n)-n/4) +##' y <- diveMove:::.runquantile(x, k, probs=c(0.05, 0.25, 0.5, 0.75, 0.95)) +##' col <- c("black", "red", "green", "blue", "magenta", "cyan") +##' plot(x, col=col[1], main="Moving Window Quantiles") +##' lines(y[,1], col=col[2]) +##' lines(y[,2], col=col[3]) +##' lines(y[,3], col=col[4]) +##' lines(y[,4], col=col[5]) +##' lines(y[,5], col=col[6]) +##' lab=c("data", "runquantile(.05)", "runquantile(.25)", "runquantile(0.5)", +##' "runquantile(.75)", "runquantile(.95)") +##' legend(0,230, lab, col=col, lty=1) +##' +##' ## basic tests against apply/embed +##' a <- diveMove:::.runquantile(x, k, c(0.3, 0.7), endrule="trim") +##' b <- t(apply(embed(x, k), 1, quantile, probs=c(0.3, 0.7))) +##' eps <- .Machine$double.eps ^ 0.5 +##' stopifnot(all(abs(a - b) < eps)) +##' +##' ## Test against loop approach +##' +##' ## This test works fine at the R prompt but fails during package check - +##' ## need to investigate +##' k <- 25; n <- 200 +##' x <- rnorm(n, sd=30) + abs(seq(n) - n / 4) # create random data +##' x[seq(1, n, 11)] <- NaN; # add NANs +##' k2 <- k %/% 2 +##' k1 <- k - k2 - 1 +##' a <- diveMove:::.runquantile(x, k, probs=c(0.3, 0.8)) +##' b <- matrix(0, n, 2) +##' for(j in 1:n) { +##' lo <- max(1, j - k1) +##' hi <- min(n, j + k2) +##' b[j, ] <- quantile(x[lo:hi], probs=c(0.3, 0.8), na.rm=TRUE) +##' } +##' ## stopifnot(all(abs(a-b)= \code{x.break}[1] - & \code{x} < \code{x.break}[2] is 2nd one, and \code{x} >= - \code{x.break}[2] is 3rd one.} - - \item{bec}{numeric vector or matrix with values for the bout ending - criterion which should be compared against the values in x for - identifying the bouts.} - - \item{p}{numeric vector of proportions (0-1) to transform to the logit - scale.} - - \item{logit}{numeric scalar: logit value to transform back to original - scale.} - -} - - -\value{ - - \code{boutfreqs} returns a data frame with components \var{lnfreq} - containing the log frequencies and \var{x}, containing the - corresponding mid points of the histogram. Empty bins are excluded. - A plot (histogram of \emph{input data}) is produced as a side effect - if argument plot is \code{TRUE}. See the Details section. - - \code{boutinit} returns a list with as many elements as the number of - processes implied by \code{x.break} (i.e. \code{length(x.break) + 1}). - Each element is a vector of length two, corresponding to \code{a} and - \code{lambda}, which are starting values derived from broken stick - model. A plot is produced as a side effect if argument \code{plot} is - \code{TRUE}. - - \code{labelBouts} returns a numeric vector sequentially labelling each - row or element of \var{x}, which associates it with a particular bout. - - \code{unLogit} and \code{logit} return a numeric vector with the - (un)transformed arguments. - -} - - -\details{This follows the procedure described in Mori et al. (2001), - which is based on Sibly et al. 1990. Currently, only a two process - model is supported. - - \code{boutfreqs} creates a histogram with the log transformed - frequencies of \var{x} with a chosen bin width and upper limit. Bins - following empty ones have their frequencies averaged over the number - of previous empty bins plus one. - - \code{boutinit} fits a "broken stick" model to the log frequencies - modelled as a function of \var{x} (well, the midpoints of the binned - data), using chosen value(s) to separate the two or three processes. - - \code{labelBouts} labels each element (or row, if a matrix) of \var{x} - with a sequential number, identifying which bout the reading belongs - to. The \code{bec} argument needs to have the same dimensions as - \code{x} to allow for situations where \code{bec} within \code{x}. - - \code{logit} and \code{unLogit} are useful for reparameterizing the - negative maximum likelihood function, if using Langton et al. (1995). - -} - - -\references{ - - Langton, S.; Collett, D. and Sibly, R. (1995) Splitting behaviour into - bouts; a maximum likelihood approach. Behaviour \bold{132}, 9-10. - - Luque, S.P. and Guinet, C. (2007) A maximum likelihood approach for - identifying dive bouts improves accuracy, precision, and objectivity. - Behaviour, \bold{144}, 1315-1332. - - Mori, Y.; Yoda, K. and Sato, K. (2001) Defining dive bouts using a - sequential differences analysis. Behaviour, 2001 \bold{138}, - 1451-1466. - - Sibly, R.; Nott, H. and Fletcher, D. (1990) Splitting behaviour into - bouts. Animal Behaviour \bold{39}, 63-69. - -} - - -\seealso{\code{\link{bouts2.nls}}, \code{\link{bouts.mle}}. These - include an example for \code{labelBouts}.} - -% ------------------------------------------------------------------------- - -\examples{ - -\donttest{## Too long for checks -## Using the Example from '?diveStats': -utils::example("diveStats", package="diveMove", - ask=FALSE, echo=FALSE, run.donttest=TRUE) -postdives <- tdrX.tab$postdive.dur[tdrX.tab$phase.no == 2] -## Remove isolated dives -postdives <- postdives[postdives < 2000] -lnfreq <- boutfreqs(postdives, bw=0.1, method="seq.diff", plot=FALSE) -boutinit(lnfreq, 50) - -## See ?bouts.mle for labelBouts() example - -} - -} - - -\author{Sebastian P. Luque \email{spluque@gmail.com}} - - -\keyword{misc} -\keyword{manip} diff --git a/man/boutfreqs.Rd b/man/boutfreqs.Rd new file mode 100644 index 0000000..fe2f117 --- /dev/null +++ b/man/boutfreqs.Rd @@ -0,0 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/bouts_helpers.R +\name{boutfreqs} +\alias{boutfreqs} +\title{Histogram of log-transformed frequencies} +\usage{ +boutfreqs(x, bw, method = c("standard", "seq.diff"), plot = TRUE, ...) +} +\arguments{ +\item{x}{numeric vector on which bouts will be identified based on +\dQuote{method}. For \code{labelBouts} it can also be a matrix with +different variables for which bouts should be identified.} + +\item{bw}{numeric scalar: bin width for the histogram.} + +\item{method}{character: method used for calculating the frequencies: +\dQuote{standard} simply uses x, while \dQuote{seq.diff} uses the +sequential differences method.} + +\item{plot}{logical, whether to plot results or not.} + +\item{...}{For \code{boutfreqs}, arguments passed to hist (must exclude +\code{breaks} and \code{include.lowest})} +} +\value{ +\code{boutfreqs} returns an object of class \code{Bouts}, with slot +\code{lnfreq} consisting of a data frame with components \var{lnfreq} +containing the log frequencies and \var{x}, containing the +corresponding mid points of the histogram. Empty bins are excluded. A +plot (histogram of \emph{input data}) is produced as a side effect if +argument plot is \code{TRUE}. See the Details section. +} +\description{ +Histogram of log-transformed frequencies +} +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} diff --git a/man/boutinit.Rd b/man/boutinit.Rd new file mode 100644 index 0000000..8496e2d --- /dev/null +++ b/man/boutinit.Rd @@ -0,0 +1,63 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/AllMethod.R +\name{boutinit,data.frame-method} +\alias{boutinit,data.frame-method} +\alias{boutinit} +\alias{boutinit,Bouts-method} +\title{Fit "broken stick" model to log frequency data for identification of +bouts of behaviour} +\usage{ +\S4method{boutinit}{data.frame}(obj, x.break, plot = TRUE, ...) + +\S4method{boutinit}{Bouts}(obj, x.break, plot = TRUE, ...) +} +\arguments{ +\item{obj}{Object of class \code{\link{Bouts}} or +\code{\link{data.frame}}.} + +\item{x.break}{Numeric vector of length 1 or 2 with \code{x} value(s) +defining the break(s) point(s) for broken stick model, such that +\code{x} < \code{x.break}[1] is 1st process, and \code{x} >= +\code{x.break}[1] & \code{x} < \code{x.break}[2] is 2nd one, and +\code{x} >= \code{x.break}[2] is 3rd one.} + +\item{plot}{logical, whether to plot results or not.} + +\item{...}{arguments passed to \code{\link{plot}} (must exclude +\code{type}).} +} +\value{ +(2,N) matrix with as many columns as the number of processes + implied by \code{x.break} (i.e. \code{length(x.break) + 1}). Rows + are named \code{a} and \code{lambda}, corresponding to starting + values derived from broken stick model. A plot is produced as a + side effect if argument \code{plot} is \code{TRUE}. +} +\description{ +Fits "broken stick" model to the log frequencies modelled as a function +of \var{x} (well, the midpoints of the binned data), using chosen +value(s) to separate the two or three processes. +} +\section{Methods (by class)}{ +\itemize{ +\item \code{data.frame}: Fit "broken-stick" model on \code{data.frame} +object + +\item \code{Bouts}: Fit "broken-stick" model on \code{Bouts} object +}} + +\examples{ +## 2-process +utils::example("rmixexp", package="diveMove", ask=FALSE) +## 'rndproc2' is a random sample vector from the example +xbouts2 <- boutfreqs(rndprocs2, 5) # Bouts class result +(startval2 <- boutinit(xbouts2, 80)) + +## 3-process +## 'rndproc3' is a random sample vector from the example +xbouts3 <- boutfreqs(rndprocs3, 5) +(startval3 <- boutinit(xbouts3, c(75, 220))) +} +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} diff --git a/man/bouts-internal.Rd b/man/bouts-internal.Rd new file mode 100644 index 0000000..426a264 --- /dev/null +++ b/man/bouts-internal.Rd @@ -0,0 +1,57 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/bouts_helpers.R +\name{calc.p} +\alias{calc.p} +\alias{build.p.lambda} +\alias{logit} +\alias{unLogit} +\title{Utilities for Poisson mixture analyses} +\usage{ +calc.p(coefs) + +build.p.lambda(x) + +logit(p) + +unLogit(logit) +} +\arguments{ +\item{coefs}{numeric matrix [2,N] of coefficients (\code{a} and +\code{lambda}) in rows for each process of the model in columns. +Columns are assumed to be in decreasing order with respect to +\code{lambda}} + +\item{x}{numeric vector of coefficients} + +\item{p}{numeric vector of proportions (0-1) to transform to the logit +scale.} + +\item{logit}{numeric scalar: logit value to transform back to original +scale.} +} +\value{ +numeric vector with proportion parameters implied by + \code{coefs}. + +named (\code{p}, \code{lambda}) list with parsed coefficients. + +\code{unLogit} and \code{logit} return a numeric vector with + the (un)transformed arguments. +} +\description{ +\code{calc.p} computes \code{p} (proportion) parameter from \code{a} +and \code{lambda} coefficients in a broken stick model. + +\code{build.p.lambda} parses the \code{x} vector, usually returned by +the \code{coef} method, where \eqn{x = +(p_0,\dots,p_n,\lambda_1,\dots,\lambda_{n+1})}{x=(p_0,...,p_n,lambda_0,...,lambda_n+1)}, +and build a named list with \code{p} and \code{lambda} elements to use +in fitting functions. + +\code{logit} and \code{unLogit} are helpful for reparameterizing the +negative maximum likelihood function, if using Langton et al. (1995). +} +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} +\keyword{internal} diff --git a/man/bouts2MLE.Rd b/man/bouts2MLE.Rd deleted file mode 100644 index 4de636b..0000000 --- a/man/bouts2MLE.Rd +++ /dev/null @@ -1,215 +0,0 @@ -\name{bouts2MLE} - -\alias{bouts2.mleFUN} -\alias{bouts2.ll} -\alias{bouts2.LL} -\alias{bouts.mle} -\alias{bouts2.mleBEC} -\alias{plotBouts2.mle} -\alias{plotBouts2.cdf} - -% ------------------------------------------------------------------------- - -\title{Maximum Likelihood Model of mixture of 2 Poisson Processes} - - -\description{ Functions to model a mixture of 2 random Poisson processes - to identify bouts of behaviour. This follows Langton et al. (1995). } - - -\usage{ -bouts2.mleFUN(x, p, lambda1, lambda2) -bouts2.ll(x) -bouts2.LL(x) -bouts.mle(ll.fun, start, x, \ldots) -bouts2.mleBEC(fit) -plotBouts2.mle(fit, x, xlab="x", ylab="Log Frequency", bec.lty=2, \ldots) -plotBouts2.cdf(fit, x, draw.bec=FALSE, bec.lty=2, \ldots) -} - - -\arguments{ - - \item{x}{numeric vector with values to model.} - - \item{p, lambda1, lambda2}{numeric: parameters of the mixture of - Poisson processes.} - - \item{ll.fun}{function returning the negative of the maximum - likelihood function that should be maximized. This should be a - valid \code{minuslogl} argument to \code{\link[stats4]{mle}}.} - - \item{start, \ldots}{Arguments passed to \code{\link[stats4]{mle}}. - For \code{plotBouts2.cdf}, arguments passed to - \code{\link{plot.ecdf}}. For \code{plotBouts2.mle}, arguments passed - to \code{\link{curve}} (must exclude \code{xaxs}, \code{yaxs}). For - \code{plotBouts2.nls}, arguments passed to \code{\link{plot}} (must - exclude \code{type}).} - - \item{fit}{\code{\link[stats4]{mle}} object.} - - \item{xlab, ylab}{character: titles for the x and y axes.} - - \item{bec.lty}{Line type specification for drawing the BEC reference - line.} - - \item{draw.bec}{logical; do we draw the BEC?} - -} - - -\value{ - - \code{bouts.mle} returns an object of class \code{\link[stats4]{mle}}. - - \code{bouts2.mleBEC} and \code{bouts2.mleFUN} return a numeric vector. - - \code{bouts2.LL} and \code{bouts2.ll} return a function. - - \code{plotBouts2.mle} and \code{plotBouts2.cdf} return nothing, but - produce a plot as side effect. - -} - - -\details{ For now only a mixture of 2 Poisson processes is supported. - Even in this relatively simple case, it is very important to provide - good starting values for the parameters. - - One useful strategy to get good starting parameter values is to - proceed in 4 steps. First, fit a broken stick model to the log - frequencies of binned data (see \code{\link{boutinit}}), to obtain - estimates of 4 parameters corresponding to a 2-process model (Sibly et - al. 1990). Second, calculate parameter \var{p} from the 2 alpha - parameters obtained from the broken stick model, to get 3 tentative - initial values for the 2-process model from Langton et al. (1995). - Third, obtain MLE estimates for these 3 parameters, but using a - reparameterized version of the -log L2 function. Lastly, obtain the - final MLE estimates for the 3 parameters by using the estimates from - step 3, un-transformed back to their original scales, maximizing the - original parameterization of the -log L2 function. - - \code{\link{boutinit}} can be used to perform step 1. Calculation of - the mixing parameter \var{p} in step 2 is trivial from these - estimates. Function \code{\link{bouts2.LL}} is a reparameterized - version of the -log L2 function given by Langton et al. (1995), so can - be used for step 3. This uses a logit (see \code{\link{logit}}) - transformation of the mixing parameter \var{p}, and log - transformations for both density parameters \var{lambda1} and - \var{lambda2}. Function \code{\link{bouts2.ll}} is the -log L2 - function corresponding to the un-transformed model, hence can be used - for step 4. - - \code{bouts.mle} is the function performing the main job of maximizing - the -log L2 functions, and is essentially a wrapper around - \code{\link[stats4]{mle}}. It only takes the -log L2 function, a list - of starting values, and the variable to be modelled, all of which are - passed to \code{\link[stats4]{mle}} for optimization. Additionally, - any other arguments are also passed to \code{\link[stats4]{mle}}, - hence great control is provided for fitting any of the -log L2 - functions. - - In practice, step 3 does not pose major problems using the - reparameterized -log L2 function, but it might be useful to use method - \dQuote{L-BFGS-B} with appropriate lower and upper bounds. Step 4 can - be a bit more problematic, because the parameters are usually on very - different scales. Therefore, it is almost always the rule to use - method \dQuote{L-BFGS-B}, again bounding the parameter search, as well - as passing a \code{control} list with proper \code{parscale} for - controlling the optimization. See \code{Note} below for useful - constraints which can be tried. - -} - - -\note{ - - In the case of a mixture of 2 Poisson processes, useful values for - lower bounds for the \code{bouts.LL} reparameterization are - \code{c(-2, -5, -10)}. For \code{bouts2.ll}, useful lower bounds are - \code{rep(1e-08, 3)}. A useful parscale argument for the latter is - \code{c(1, 0.1, 0.01)}. However, I have only tested this for cases of - diving behaviour in pinnipeds, so these suggested values may not be - useful in other cases. - - The lambdas can be very small for some data, particularly - \code{lambda2}, so the default \code{ndeps} in \code{\link{optim}} can - be so large as to push the search outside the bounds given. To avoid - this problem, provide a smaller \code{ndeps} value. - -} - - -\references{ - - Langton, S.; Collett, D. and Sibly, R. (1995) Splitting behaviour into - bouts; a maximum likelihood approach. Behaviour \bold{132}, 9-10. - - Luque, S.P. and Guinet, C. (2007) A maximum likelihood approach for - identifying dive bouts improves accuracy, precision, and objectivity. - Behaviour, \bold{144}, 1315-1332. - - Sibly, R.; Nott, H. and Fletcher, D. (1990) Splitting behaviour into - bouts. Animal Behaviour \bold{39}, 63-69. - -} - - -\seealso{ \code{\link[stats4]{mle}}, \code{\link{optim}}, - \code{\link{logit}}, \code{\link{unLogit}} for transforming and - fitting a reparameterized model. } - -% ------------------------------------------------------------------------- - -\examples{ - -\donttest{## Too long for checks -## Using the Example from '?diveStats': -utils::example("diveStats", package="diveMove", - ask=FALSE, echo=FALSE, run.donttest=TRUE) -postdives <- tdrX.tab$postdive.dur[tdrX.tab$phase.no == 2] -postdives.diff <- abs(diff(postdives)) - -## Remove isolated dives -postdives.diff <- postdives.diff[postdives.diff < 2000] -lnfreq <- boutfreqs(postdives.diff, bw=0.1, plot=FALSE) -startval <- boutinit(lnfreq, 50) -p <- startval[[1]]["a"] / (startval[[1]]["a"] + startval[[2]]["a"]) - -## Fit the reparameterized (transformed parameters) model -## Drop names by wrapping around as.vector() -init.parms <- list(p=as.vector(logit(p)), - lambda1=as.vector(log(startval[[1]]["lambda"])), - lambda2=as.vector(log(startval[[2]]["lambda"]))) -bout.fit1 <- bouts.mle(bouts2.LL, start=init.parms, x=postdives.diff, - method="L-BFGS-B", lower=c(-2, -5, -10)) -coefs <- as.vector(coef(bout.fit1)) - -## Un-transform and fit the original parameterization -init.parms <- list(p=unLogit(coefs[1]), lambda1=exp(coefs[2]), - lambda2=exp(coefs[3])) -bout.fit2 <- bouts.mle(bouts2.ll, x=postdives.diff, start=init.parms, - method="L-BFGS-B", lower=rep(1e-08, 3), - control=list(parscale=c(1, 0.1, 0.01))) -plotBouts(bout.fit2, postdives.diff) - -## Plot cumulative frequency distribution -plotBouts2.cdf(bout.fit2, postdives.diff) - -## Estimated BEC -bec <- bec2(bout.fit2) - -## Label bouts -labelBouts(postdives, rep(bec, length(postdives)), - bec.method="seq.diff") - -} - -} - - -\author{Sebastian P. Luque \email{spluque@gmail.com}} - - -\keyword{models} -\keyword{manip} diff --git a/man/bouts2NLS.Rd b/man/bouts2NLS.Rd deleted file mode 100644 index 9274076..0000000 --- a/man/bouts2NLS.Rd +++ /dev/null @@ -1,135 +0,0 @@ -\name{bouts2NLS} - -\alias{bouts2.nlsFUN} -\alias{bouts2.nls} -\alias{bouts2.nlsBEC} -\alias{plotBouts2.nls} - -% ------------------------------------------------------------------------- - -\title{Fit mixture of 2 Poisson Processes to Log Frequency data} - - -\description{ - Functions to model a mixture of 2 random Poisson processes to - histogram-like data of log frequency vs interval mid points. This - follows Sibly et al. (1990) method. -} - - -\usage{ -bouts2.nlsFUN(x, a1, lambda1, a2, lambda2) -bouts2.nls(lnfreq, start, maxiter) -bouts2.nlsBEC(fit) -plotBouts2.nls(fit, lnfreq, bec.lty, \ldots) -} - - -\arguments{ - - \item{x}{numeric vector with values to model.} - - \item{a1, lambda1, a2, lambda2}{numeric: parameters from the mixture - of Poisson processes.} - - \item{lnfreq}{\code{\link{data.frame}} with named components - \var{lnfreq} (log frequencies) and corresponding \var{x} (mid points - of histogram bins).} - - \item{start, maxiter}{Arguments passed to \code{\link{nls}}.} - - \item{fit}{nls object.} - - \item{bec.lty}{Line type specification for drawing the BEC reference - line.} - - \item{\ldots}{Arguments passed to \code{\link{plot.default}}.} - -} - - -\value{ - - \code{bouts2.nlsFUN} returns a numeric vector evaluating the mixture of 2 - Poisson process. - - \code{bouts2.nls} returns an nls object resulting from fitting this - model to data. - - \code{bouts2.nlsBEC} returns a number corresponding to the bout ending - criterion derived from the model. - - \code{plotBouts2.nls} plots the fitted model with the corresponding - data. - -} - - -\details{ - \code{bouts2.nlsFUN} is the function object defining the nonlinear - least-squares relationship in the model. It is not meant to be used - directly, but is used internally by \code{bouts2.nls}. - - \code{bouts2.nls} fits the nonlinear least-squares model itself. - - \code{bouts2.nlsBEC} calculates the BEC from a list object, as the one - that is returned by \code{\link{nls}}, representing a fit of the - model. \code{plotBouts2.nls} plots such an object. -} - - -\references{ - - Sibly, R.; Nott, H. and Fletcher, D. (1990) Splitting behaviour into - bouts Animal Behaviour \bold{39}, 63-69. - -} - - -\seealso{ - \code{\link{bouts.mle}} for a better approach; - \code{\link{boutfreqs}}; \code{\link{boutinit}} -} - -% ------------------------------------------------------------------------- - -\examples{ - -\donttest{## Too long for checks -## Using the Example from '?diveStats': -utils::example("diveStats", package="diveMove", - ask=FALSE, echo=FALSE, run.donttest=TRUE) -## Postdive durations -postdives <- tdrX.tab$postdive.dur[tdrX.tab$phase.no == 2] -postdives.diff <- abs(diff(postdives)) -## Remove isolated dives -postdives.diff <- postdives.diff[postdives.diff < 2000] - -## Construct histogram -lnfreq <- boutfreqs(postdives.diff, bw=0.1, plot=FALSE) - -startval <- boutinit(lnfreq, 50) -## Drop names by wrapping around as.vector() -startval.l <- list(a1=as.vector(startval[[1]]["a"]), - lambda1=as.vector(startval[[1]]["lambda"]), - a2=as.vector(startval[[2]]["a"]), - lambda2=as.vector(startval[[2]]["lambda"])) - -## Fit the 2 process model -bout.fit <- bouts2.nls(lnfreq, start=startval.l, maxiter=500) -summary(bout.fit) -plotBouts(bout.fit) - -## Estimated BEC -bec2(bout.fit) - -} - -} - - -\author{Sebastian P. Luque \email{spluque@gmail.com}} - - -\keyword{models} -\keyword{manip} diff --git a/man/bouts3NLS.Rd b/man/bouts3NLS.Rd deleted file mode 100644 index ec8a804..0000000 --- a/man/bouts3NLS.Rd +++ /dev/null @@ -1,141 +0,0 @@ -% $Id: bouts3NLS.Rd 464 2011-03-28 07:22:06Z sluque $ -\name{bouts3NLS} - -\alias{bouts3.nlsFUN} -\alias{bouts3.nls} -\alias{bouts3.nlsBEC} -\alias{plotBouts3.nls} - -% ------------------------------------------------------------------------- - -\title{Fit mixture of 3 Poisson Processes to Log Frequency data} - - -\description{ Functions to model a mixture of 3 random Poisson processes - to histogram-like data of log frequency vs interval mid points. This - follows Sibly et al. (1990) method, adapted for a three-process model - by Berdoy (1993). - } - - -\usage{ -bouts3.nlsFUN(x, a1, lambda1, a2, lambda2, a3, lambda3) -bouts3.nls(lnfreq, start, maxiter) -bouts3.nlsBEC(fit) -plotBouts3.nls(fit, lnfreq, bec.lty, \ldots) -} - - -\arguments{ - - \item{x}{numeric vector with values to model.} - - \item{a1, lambda1, a2, lambda2, a3, lambda3}{numeric: parameters from - the mixture of Poisson processes.} - - \item{lnfreq}{\code{\link{data.frame}} with named components - \var{lnfreq} (log frequencies) and corresponding \var{x} (mid points - of histogram bins).} - - \item{start, maxiter}{Arguments passed to \code{\link{nls}}.} - - \item{fit}{nls object.} - - \item{bec.lty}{Line type specification for drawing the BEC reference - line.} - - \item{\ldots}{Arguments passed to \code{\link{plot.default}}.} - -} - - -\value{ - - \code{bouts3.nlsFUN} returns a numeric vector evaluating the mixture of 3 - Poisson process. - - \code{bouts3.nls} returns an nls object resulting from fitting this - model to data. - - \code{bouts3.nlsBEC} returns a number corresponding to the bout ending - criterion derived from the model. - - \code{plotBouts3.nls} plots the fitted model with the corresponding - data. - -} - - -\details{ - \code{bouts3.nlsFUN} is the function object defining the nonlinear - least-squares relationship in the model. It is not meant to be used - directly, but is used internally by \code{bouts3.nls}. - - \code{bouts3.nls} fits the nonlinear least-squares model itself. - - \code{bouts3.nlsBEC} calculates the BEC from a list object, as the one - that is returned by \code{\link{nls}}, representing a fit of the - model. \code{plotBouts3.nls} plots such an object. -} - - -\references{ - - Sibly, R.; Nott, H. and Fletcher, D. (1990) Splitting behaviour into - bouts. Animal Behaviour \bold{39}, 63-69. - - Berdoy, M. (1993) Defining bouts of behaviour: a three-process model. - Animal Behaviour \bold{46}, 387-396. - -} - - -\seealso{ - \code{\link{bouts.mle}} for a better approach; - \code{\link{boutfreqs}}; \code{\link{boutinit}} -} - -% ------------------------------------------------------------------------- - -\examples{ - -\donttest{## Too long for checks -## Using the Example from '?diveStats': -utils::example("diveStats", package="diveMove", - ask=FALSE, echo=FALSE, run.donttest=TRUE) -## Postdive durations -postdives <- tdrX.tab$postdive.dur -postdives.diff <- abs(diff(postdives)) -## Remove isolated dives -postdives.diff <- postdives.diff[postdives.diff < 4000] - -## Construct histogram -lnfreq <- boutfreqs(postdives.diff, bw=0.1, plot=FALSE) - -startval <- boutinit(lnfreq, c(50, 400)) -## Drop names by wrapping around as.vector() -startval.l <- list(a1=as.vector(startval[[1]]["a"]), - lambda1=as.vector(startval[[1]]["lambda"]), - a2=as.vector(startval[[2]]["a"]), - lambda2=as.vector(startval[[2]]["lambda"]), - a3=as.vector(startval[[3]]["a"]), - lambda3=as.vector(startval[[3]]["lambda"])) - -## Fit the 3 process model -bout.fit <- bouts3.nls(lnfreq, start=startval.l, maxiter=500) -summary(bout.fit) -plotBouts(bout.fit) - -## Estimated BEC -bec3(bout.fit) - -} - -} - - -\author{Sebastian P. Luque \email{spluque@gmail.com}} - - -\keyword{models} -\keyword{manip} diff --git a/man/boutsBEC.Rd b/man/boutsBEC.Rd new file mode 100644 index 0000000..03bca7a --- /dev/null +++ b/man/boutsBEC.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/AllMethod.R +\name{bec,nls-method} +\alias{bec,nls-method} +\alias{bec} +\alias{bec,mle-method} +\title{Calculate bout ending criteria from model coefficients} +\usage{ +\S4method{bec}{nls}(fit) + +\S4method{bec}{mle}(fit) +} +\arguments{ +\item{fit}{Object of class \code{nls} or \code{mle}.} +} +\value{ +\code{numeric} vector with the bout ending criterion or + criteria derived from the model. +} +\description{ +Calculate bout ending criteria from model coefficients +} +\section{Functions}{ +\itemize{ +\item \code{bec,nls-method}: Calculate BEC on \code{nls} object + +\item \code{bec,mle-method}: Calculate BEC on \code{mle} object +}} + +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} +\keyword{manip} +\keyword{models} diff --git a/man/boutsCDF.Rd b/man/boutsCDF.Rd new file mode 100644 index 0000000..7da17a4 --- /dev/null +++ b/man/boutsCDF.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/bouts_helpers.R +\name{boutsCDF} +\alias{boutsCDF} +\title{Estimated cumulative frequency for two- or three-process Poisson +mixture models} +\usage{ +boutsCDF(x, p, lambdas) +} +\arguments{ +\item{x}{numeric vector described by model.} + +\item{p}{numeric scalar or vector of proportion parameters.} + +\item{lambdas}{numeric vector of rate parameters.} +} +\value{ +numeric vector with cumulative frequency. +} +\description{ +Estimated cumulative frequency for two- or three-process Poisson +mixture models +} +\examples{ +utils::example("rmixexp", package="diveMove", ask=FALSE) +## boutsCDF(rndprocs3, p=p_true, lambdas=lda_true) +} +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} diff --git a/man/boutsMLEll.Rd b/man/boutsMLEll.Rd new file mode 100644 index 0000000..a418eb0 --- /dev/null +++ b/man/boutsMLEll.Rd @@ -0,0 +1,50 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/bouts.R +\name{boutsMLEll.chooser} +\alias{boutsMLEll.chooser} +\alias{.bouts2MLEll} +\alias{.bouts3MLEll} +\title{Log likelihood function of parameters given observed data} +\usage{ +boutsMLEll.chooser(x, x0, transformed = TRUE) + +.bouts2MLEll(x, p, lambda0, lambda1) + +.bouts3MLEll(x, p0, p1, lambda0, lambda1, lambda2) +} +\arguments{ +\item{x}{numeric vector of independent data to be described by the +function.} + +\item{x0}{numerical one-dimensional vector of coefficients.} + +\item{transformed}{logical indicating whether coefficients need to be +transformed back to original scale to compute the negative log +likelihood.} + +\item{p, lambda0, lambda1}{numeric: parameters of the model.} + +\item{p0, p1, lambda2}{numeric: parameters of the model.} +} +\value{ +\code{ll.chooser} returns the negative log likelihood function + of the joint distribution. + +numeric vector + +numeric vector +} +\description{ +This function defines a closure, where \code{x} will be the object +passed to it. +} +\section{Functions}{ +\itemize{ +\item \code{.bouts2MLEll}: Log likelihood function in a 2-process Poisson +mixture + +\item \code{.bouts3MLEll}: Log likelihood function in a 3-process Poisson +mixture +}} + +\keyword{internal} diff --git a/man/boutsNLSll.Rd b/man/boutsNLSll.Rd new file mode 100644 index 0000000..2319513 --- /dev/null +++ b/man/boutsNLSll.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/AllMethod.R +\name{boutsNLSll,Bouts-method} +\alias{boutsNLSll,Bouts-method} +\alias{boutsNLSll} +\alias{boutsNLSll,numeric-method} +\title{Generalized log likelihood function taking any number of Poisson + processes in a "broken-stick" model} +\usage{ +\S4method{boutsNLSll}{Bouts}(obj, coefs) + +\S4method{boutsNLSll}{numeric}(obj, coefs) +} +\arguments{ +\item{obj}{Object of class \code{\link{Bouts}} or numeric vector of +independent data to be described by the function.} + +\item{coefs}{matrix of coefficients (\code{a} and \code{lambda}) in +rows for each process of the model in columns.} +} +\value{ +numeric vector as \code{x} with the evaluated function. +} +\description{ +Generalized log likelihood function taking any number of Poisson + processes in a "broken-stick" model +} +\section{Methods (by class)}{ +\itemize{ +\item \code{Bouts}: Log likelihood \code{Bouts} method + +\item \code{numeric}: Log likelihood function \code{numeric} method +}} + +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} diff --git a/man/calibrateDepth.Rd b/man/calibrateDepth.Rd index a590d39..35c7da8 100644 --- a/man/calibrateDepth.Rd +++ b/man/calibrateDepth.Rd @@ -1,315 +1,281 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/calibrate.R \name{calibrateDepth} \alias{calibrateDepth} - -% ------------------------------------------------------------------------- - \title{Calibrate Depth and Generate a "TDRcalibrate" object} - - -\description{Detect periods of major activities in a \acronym{TDR} - record, calibrate depth readings, and generate a - \code{\link{TDRcalibrate}} object essential for subsequent summaries - of diving behaviour.} - - \usage{ -calibrateDepth(x, dry.thr=70, wet.cond, wet.thr=3610, dive.thr=4, - zoc.method=c("visual", "offset", "filter"), \ldots, - interp.wet=FALSE, - dive.model=c("unimodal", "smooth.spline"), - smooth.par=0.1, knot.factor=3, - descent.crit.q=0, ascent.crit.q=0) +calibrateDepth( + x, + dry.thr = 70, + wet.cond, + wet.thr = 3610, + dive.thr = 4, + zoc.method = c("visual", "offset", "filter"), + ..., + interp.wet = FALSE, + dive.model = c("unimodal", "smooth.spline"), + smooth.par = 0.1, + knot.factor = 3, + descent.crit.q = 0, + ascent.crit.q = 0 +) } - - \arguments{ - - \item{x}{An object of class \code{\link{TDR}} for - \code{\link{calibrateDepth}} or an object of class - \code{\link{TDRcalibrate}} for \code{\link{calibrateSpeed}}.} - - \item{dry.thr}{numeric: dry error threshold in seconds. Dry phases - shorter than this threshold will be considered as wet.} - - \item{wet.cond}{logical: indicates which observations should be - considered wet. If it is not provided, records with non-missing - depth are assumed to correspond to wet conditions (see - \sQuote{Details} and \sQuote{Note} below).} - - \item{wet.thr}{numeric: wet threshold in seconds. At-sea phases - shorter than this threshold will be considered as trivial wet.} - - \item{dive.thr}{numeric: threshold depth below which an underwater - phase should be considered a dive.} - - \item{zoc.method}{character string to indicate the method to use for - zero offset correction. One of \dQuote{visual}, \dQuote{offset}, or - \dQuote{filter} (see \sQuote{Details}).} - - \item{\ldots}{Arguments required for ZOC methods \code{filter} - (\code{k}, \code{probs}, \code{depth.bounds} (defaults to range), - \code{na.rm} (defaults to TRUE)) and \code{offset} (\code{offset}).} - - \item{interp.wet}{logical: if TRUE (default is FALSE), then an - interpolating spline function is used to impute NA depths in wet - periods (\emph{after ZOC}). \emph{Use with caution}: it may only be - useful in cases where the missing data pattern in wet periods is - restricted to shallow depths near the beginning and end of dives. - This pattern is common in some satellite-linked \acronym{TDR}s.} - - \item{dive.model}{character string specifying what model to use for - each dive for the purpose of dive phase identification. One of - \dQuote{smooth.spline} or \dQuote{unimodal}, to choose among - smoothing spline or unimodal regression (see \sQuote{Details}). For - dives with less than five observations, smoothing spline regression - is used regardless (see \sQuote{Details}).} - - \item{smooth.par}{numeric scalar representing amount of smoothing - (argument \code{spar} in \code{\link[stats]{smooth.spline}}) when - \code{dive.model="smooth.spline"}. If it is NULL, then the - smoothing parameter is determined by Generalized Cross-validation - (GCV). Ignored with default \code{dive.model="unimodal"}.} - - \item{knot.factor}{numeric scalar that multiplies the number of - samples in the dive. This is used to construct the time predictor - for the derivative.} - - \item{descent.crit.q}{numeric: critical quantile of rates of descent - below which descent is deemed to have ended.} - - \item{ascent.crit.q}{numeric: critical quantile of rates of ascent - above which ascent is deemed to have started.} - +\item{x}{An object of class \code{\link{TDR}} for +\code{\link{calibrateDepth}} or an object of class +\code{\link{TDRcalibrate}} for \code{\link{calibrateSpeed}}.} + +\item{dry.thr}{numeric: dry error threshold in seconds. Dry phases +shorter than this threshold will be considered as wet.} + +\item{wet.cond}{logical: indicates which observations should be +considered wet. If it is not provided, records with non-missing +depth are assumed to correspond to wet conditions (see +\sQuote{Details} and \sQuote{Note} below).} + +\item{wet.thr}{numeric: wet threshold in seconds. At-sea phases shorter +than this threshold will be considered as trivial wet.} + +\item{dive.thr}{numeric: threshold depth below which an underwater +phase should be considered a dive.} + +\item{zoc.method}{character string to indicate the method to use for +zero offset correction. One of \dQuote{visual}, \dQuote{offset}, +or \dQuote{filter} (see \sQuote{Details}).} + +\item{...}{Arguments required for ZOC methods \code{filter} (\code{k}, +\code{probs}, \code{depth.bounds} (defaults to range), \code{na.rm} +(defaults to TRUE)) and \code{offset} (\code{offset}).} + +\item{interp.wet}{logical: if TRUE (default is FALSE), then an +interpolating spline function is used to impute NA depths in wet +periods (\emph{after ZOC}). \emph{Use with caution}: it may only +be useful in cases where the missing data pattern in wet periods is +restricted to shallow depths near the beginning and end of dives. +This pattern is common in some satellite-linked \acronym{TDR}s.} + +\item{dive.model}{character string specifying what model to use for +each dive for the purpose of dive phase identification. One of +\dQuote{smooth.spline} or \dQuote{unimodal}, to choose among +smoothing spline or unimodal regression (see \sQuote{Details}). +For dives with less than five observations, smoothing spline +regression is used regardless (see \sQuote{Details}).} + +\item{smooth.par}{numeric scalar representing amount of smoothing +(argument \code{spar} in \code{\link[stats]{smooth.spline}}) when +\code{dive.model="smooth.spline"}. If it is NULL, then the +smoothing parameter is determined by Generalized Cross-validation +(GCV). Ignored with default \code{dive.model="unimodal"}.} + +\item{knot.factor}{numeric scalar that multiplies the number of samples +in the dive. This is used to construct the time predictor for the +derivative.} + +\item{descent.crit.q}{numeric: critical quantile of rates of descent +below which descent is deemed to have ended.} + +\item{ascent.crit.q}{numeric: critical quantile of rates of ascent +above which ascent is deemed to have started.} } - - -\details{This function is really a wrapper around \code{.detPhase}, - \code{.detDive}, and \code{.zoc} which perform the work on simplified - objects. It performs wet/dry phase detection, zero-offset correction - of depth, and detection of dives, as well as proper labelling of the - latter. - - The procedure starts by zero-offset correcting depth (see \sQuote{ZOC} - below), and then a factor is created with value \dQuote{L} (dry) for - rows with NAs for \code{depth} and value \dQuote{W} (wet) otherwise. - This assumes that \acronym{TDR}s were programmed to turn off recording - of depth when instrument is dry (typically by means of a salt-water - switch). If this assumption cannot be made for any reason, then a - logical vector as long as the time series should be supplied as - argument \code{wet.cond} to indicate which observations should be - considered wet. This argument is directly analogous to the - \code{subset} argument in \code{\link{subset.data.frame}}, so it can - refer to any variable in the \code{\link{TDR}} object (see - \sQuote{Note} section below). The duration of each of these phases of - activity is subsequently calculated. If the duration of a dry phase - (\dQuote{L}) is less than \code{dry.thr}, then the values in the - factor for that phase are changed to \dQuote{W} (wet). The duration - of phases is then recalculated, and if the duration of a phase of wet - activity is less than \code{wet.thr}, then the corresponding value for - the factor is changed to \dQuote{Z} (trivial wet). The durations of - all phases are recalculated a third time to provide final phase - durations. - - Some instruments produce a peculiar pattern of missing data near the - surface, at the beginning and/or end of dives. The argument - \code{interp.wet} may help to rectify this problem by using an - interpolating spline function to impute the missing data, constraining - the result to a minimum depth of zero. Please note that this optional - step is performed after ZOC and before identifying dives, so that - interpolation is performed through dry phases coded as wet because - their duration was briefer than \code{dry.thr}. Therefore, - \code{dry.thr} must be chosen carefully to avoid interpolation through - legitimate dry periods. - - The next step is to detect dives whenever the zero-offset corrected - depth in an underwater phase is below the specified dive threshold. A - new factor with finer levels of activity is thus generated, including - \dQuote{U} (underwater), and \dQuote{D} (diving) in addition to the - ones described above. - - Once dives have been detected and assigned to a period of wet - activity, phases within dives are identified using the descent, ascent - and wiggle criteria (see \sQuote{Detection of dive phases} below). - This procedure generates a factor with levels \dQuote{D}, \dQuote{DB}, - \dQuote{B}, \dQuote{BA}, \dQuote{DA}, \dQuote{A}, and \dQuote{X}, - breaking the input into descent, descent/bottom, bottom, - bottom/ascent, ascent, descent/ascent (ocurring when no bottom phase - can be detected) and non-dive (surface), respectively. - +\value{ +An object of class \code{\link{TDRcalibrate}}. } - -\section{ZOC}{This procedure is required to correct drifts in the - pressure transducer of \acronym{TDR} records and noise in depth - measurements. Three methods are available to perform this correction. - - Method \dQuote{visual} calls \code{\link{plotTDR}}, which plots depth - and, optionally, speed vs. time with the ability of zooming in and out - on time, changing maximum depths displayed, and panning through time. - The button to zero-offset correct sections of the record allows for - the collection of \sQuote{x} and \sQuote{y} coordinates for two - points, obtained by clicking on the plot region. The first point - clicked represents the offset and beginning time of section to - correct, and the second one represents the ending time of the section - to correct. Multiple sections of the record can be corrected in this - manner, by panning through the time and repeating the procedure. In - case there's overlap between zero offset corrected windows, the last - one prevails. - - Method \dQuote{offset} can be used when the offset is known in - advance, and this value is used to correct the entire time series. - Therefore, offset=0 specifies no correction. - - Method \dQuote{filter} implements a smoothing/filtering mechanism - where running quantiles can be applied to depth measurements in a - recursive manner (Luque and Fried 2011), using \code{.depth.filter}. - The method calculates the first running quantile defined by - \code{probs[1]} on a moving window of size \code{k[1]}. The next - running quantile, defined by \code{probs[2]} and \code{k[2]}, is - applied to the smoothed/filtered depth measurements from the previous - step, and so on. The corrected depth measurements (d) are calculated - as: - - \deqn{d=d_{0} - d_{n}}{d=d[0] - d[n]} - - where \eqn{d_{0}}{d[0]} is original depth and \eqn{d_{n}}{d[n]} is the - last smoothed/filtered depth. This method is under development, but - reasonable results can be achieved by applying two filters (see - \sQuote{Examples}). The default \code{na.rm=TRUE} works well when - there are no level shifts between non-NA phases in the data, but - \code{na.rm=FALSE} is better in the presence of such shifts. In other - words, there is no reason to pollute the moving window with NAs when - non-NA phases can be regarded as a continuum, so splicing non-NA - phases makes sense. Conversely, if there are level shifts between - non-NA phases, then it is better to retain NA phases to help the - algorithm recognize the shifts while sliding the window(s). The - search for the surface can be limited to specified bounds during - smoothing/filtering, so that observations outside these bounds are - interpolated using the bounded smoothed/filtered series. - - Once the whole record has been zero-offset corrected, remaining depths - below zero, are set to zero, as these are assumed to indicate values - at the surface. - +\description{ +Detect periods of major activities in a \acronym{TDR} record, calibrate +depth readings, and generate a \code{\link{TDRcalibrate}} object +essential for subsequent summaries of diving behaviour. } - - -\section{Detection of dive phases}{ - - The process for each dive begins by taking all observations below the - dive detection threshold, and setting the beginning and end depths to - zero, at time steps prior to the first and after the last, - respectively. The latter ensures that descent and ascent derivatives - are non-negative and non-positive, respectively, so that the end and - beginning of these phases are not truncated. The next step is to fit - a model to each dive. Two models can be chosen for this purpose: - \sQuote{unimodal} (default) and \sQuote{smooth.spline}. - - Both models consist of a cubic spline, and its first derivative is - evaluated to investigate changes in vertical rate. Therefore, at - least 4 observations are required for each dive, so the time series is - linearly interpolated at equally spaced time steps if this limit is - not achieved in the current dive. Wiggles at the beginning and end of - the dive are assumed to be zero offset correction errors, so depth - observations at these extremes are interpolated between zero and the - next observations when this occurs. - - \subsection{\sQuote{unimodal}}{ - - In this default model, the spline is constrained to be unimodal - (Koellmann et al. 2014), assuming the diver must return to the - surface to breathe. The model is fitted using the uniReg package - (see \code{\link[uniReg]{unireg}}). This model and constraint are - consistent with the definition of dives in air-breathers, so is - certainly appropriate for this group of divers. A major advantage - of this approach over the next one is that the degree of smoothing - is determined via restricted maximum likelihood, and has no - influence on identifying the transition between descent and ascent. - Therefore, unimodal regression splines make the latter transition - clearer compared to using smoothing splines. - - However, note that dives with less than five samples are fit using - smoothing splines (see section below) regardless, as they produce - the same fit as unimodal regression but much faster. Therefore, - ensure that the parameters for that model are appropriate for the - data, although defaults are reasonable. - - } - - \subsection{\sQuote{smooth.spline}}{ - - In this model, specified via \code{dive.model="smooth.spline"}, a - smoothing spline is used to model each dive (see - \code{\link{smooth.spline}}), using the chosen smoothing parameter. - - Dive phases identified via this model, however, are highly sensitive - to the degree of smoothing (\code{smooth.par}) used, thus making it - difficult to determine what amount of smoothing is adequate. - - } - - A comparison between these methods is shown in the Examples section of - \code{\link{diveModel}}. - - The first derivate of the spline is evaluated at a set of knots to - calculate the vertical rate throughout the dive and determine the end - of descent and beginning of ascent. This set of knots is established - using a regular time sequence with beginning and end equal to the - extremes of the input sequence, and with length equal to \eqn{N \times - knot.factor}{N * \code{knot.factor}}. Equivalent procedures are used - for detecting descent and ascent phases. - - Once one of the models above has been fitted to each dive, the - quantile corresponding to (\code{descent.crit.q}) of all the positive - derivatives (rate of descent) at the beginning of the dive is used as - threshold for determining the end of descent. Descent is deemed to - have ended at the \emph{first} minimum derivative, and the nearest - input time observation is considered to indicate the end of descent. - The sign of the comparisons is reversed for detecting the ascent. If - observed depth to the left and right of the derivative defining the - ascent are the same, the right takes precedence. - - The particular dive phase categories are subsequently defined using - simple set operations. - +\details{ +This function is really a wrapper around \code{.detPhase}, +\code{.detDive}, and \code{.zoc} which perform the work on simplified +objects. It performs wet/dry phase detection, zero-offset correction +of depth, and detection of dives, as well as proper labelling of the +latter. + +The procedure starts by zero-offset correcting depth (see \sQuote{ZOC} +below), and then a factor is created with value \dQuote{L} (dry) for +rows with NAs for \code{depth} and value \dQuote{W} (wet) otherwise. +This assumes that \acronym{TDR}s were programmed to turn off recording +of depth when instrument is dry (typically by means of a salt-water +switch). If this assumption cannot be made for any reason, then a +logical vector as long as the time series should be supplied as +argument \code{wet.cond} to indicate which observations should be +considered wet. This argument is directly analogous to the +\code{subset} argument in \code{\link{subset.data.frame}}, so it can +refer to any variable in the \code{\link{TDR}} object (see +\sQuote{Note} section below). The duration of each of these phases of +activity is subsequently calculated. If the duration of a dry phase +(\dQuote{L}) is less than \code{dry.thr}, then the values in the factor +for that phase are changed to \dQuote{W} (wet). The duration of phases +is then recalculated, and if the duration of a phase of wet activity is +less than \code{wet.thr}, then the corresponding value for the factor +is changed to \dQuote{Z} (trivial wet). The durations of all phases +are recalculated a third time to provide final phase durations. + +Some instruments produce a peculiar pattern of missing data near the +surface, at the beginning and/or end of dives. The argument +\code{interp.wet} may help to rectify this problem by using an +interpolating spline function to impute the missing data, constraining +the result to a minimum depth of zero. Please note that this optional +step is performed after ZOC and before identifying dives, so that +interpolation is performed through dry phases coded as wet because +their duration was briefer than \code{dry.thr}. Therefore, +\code{dry.thr} must be chosen carefully to avoid interpolation through +legitimate dry periods. + +The next step is to detect dives whenever the zero-offset corrected +depth in an underwater phase is below the specified dive threshold. A +new factor with finer levels of activity is thus generated, including +\dQuote{U} (underwater), and \dQuote{D} (diving) in addition to the +ones described above. + +Once dives have been detected and assigned to a period of wet activity, +phases within dives are identified using the descent, ascent and wiggle +criteria (see \sQuote{Detection of dive phases} below). This procedure +generates a factor with levels \dQuote{D}, \dQuote{DB}, \dQuote{B}, +\dQuote{BA}, \dQuote{DA}, \dQuote{A}, and \dQuote{X}, breaking the +input into descent, descent/bottom, bottom, bottom/ascent, ascent, +descent/ascent (ocurring when no bottom phase can be detected) and +non-dive (surface), respectively. + +## ZOC + +This procedure is required to correct drifts in the pressure transducer +of \acronym{TDR} records and noise in depth measurements. Three +methods are available to perform this correction. + +Method \dQuote{visual} calls \code{\link{plotTDR}}, which plots depth +and, optionally, speed vs. time with the ability of zooming in and out +on time, changing maximum depths displayed, and panning through time. +The button to zero-offset correct sections of the record allows for the +collection of \sQuote{x} and \sQuote{y} coordinates for two points, +obtained by clicking on the plot region. The first point clicked +represents the offset and beginning time of section to correct, and the +second one represents the ending time of the section to correct. +Multiple sections of the record can be corrected in this manner, by +panning through the time and repeating the procedure. In case there's +overlap between zero offset corrected windows, the last one prevails. + +Method \dQuote{offset} can be used when the offset is known in advance, +and this value is used to correct the entire time series. Therefore, +offset=0 specifies no correction. + +Method \dQuote{filter} implements a smoothing/filtering mechanism where +running quantiles can be applied to depth measurements in a recursive +manner (Luque and Fried 2011), using \code{.depth.filter}. The method +calculates the first running quantile defined by \code{probs[1]} on a +moving window of size \code{k[1]}. The next running quantile, defined +by \code{probs[2]} and \code{k[2]}, is applied to the smoothed/filtered +depth measurements from the previous step, and so on. The corrected +depth measurements (d) are calculated as: + +\deqn{d=d_{0} - d_{n}}{d=d[0] - d[n]} + +where \eqn{d_{0}}{d[0]} is original depth and \eqn{d_{n}}{d[n]} is the +last smoothed/filtered depth. This method is under development, but +reasonable results can be achieved by applying two filters (see +\sQuote{Examples}). The default \code{na.rm=TRUE} works well when +there are no level shifts between non-NA phases in the data, but +\code{na.rm=FALSE} is better in the presence of such shifts. In other +words, there is no reason to pollute the moving window with NAs when +non-NA phases can be regarded as a continuum, so splicing non-NA phases +makes sense. Conversely, if there are level shifts between non-NA +phases, then it is better to retain NA phases to help the algorithm +recognize the shifts while sliding the window(s). The search for the +surface can be limited to specified bounds during smoothing/filtering, +so that observations outside these bounds are interpolated using the +bounded smoothed/filtered series. + +Once the whole record has been zero-offset corrected, remaining depths +below zero, are set to zero, as these are assumed to indicate values at +the surface. + +## Detection of dive phases + +The process for each dive begins by taking all observations below the +dive detection threshold, and setting the beginning and end depths to +zero, at time steps prior to the first and after the last, +respectively. The latter ensures that descent and ascent derivatives +are non-negative and non-positive, respectively, so that the end and +beginning of these phases are not truncated. The next step is to fit a +model to each dive. Two models can be chosen for this purpose: +\sQuote{unimodal} (default) and \sQuote{smooth.spline}. + +Both models consist of a cubic spline, and its first derivative is +evaluated to investigate changes in vertical rate. Therefore, at least +4 observations are required for each dive, so the time series is +linearly interpolated at equally spaced time steps if this limit is not +achieved in the current dive. Wiggles at the beginning and end of the +dive are assumed to be zero offset correction errors, so depth +observations at these extremes are interpolated between zero and the +next observations when this occurs. + +### \sQuote{unimodal} + +In this default model, the spline is constrained to be unimodal +(Koellmann et al. 2014), assuming the diver must return to the surface +to breathe. The model is fitted using the uniReg package (see +\code{\link[uniReg]{unireg}}). This model and constraint are +consistent with the definition of dives in air-breathers, so is +certainly appropriate for this group of divers. A major advantage of +this approach over the next one is that the degree of smoothing is +determined via restricted maximum likelihood, and has no influence on +identifying the transition between descent and ascent. Therefore, +unimodal regression splines make the latter transition clearer compared +to using smoothing splines. + +However, note that dives with less than five samples are fit using +smoothing splines (see section below) regardless, as they produce the +same fit as unimodal regression but much faster. Therefore, ensure +that the parameters for that model are appropriate for the data, +although defaults are reasonable. + +### \sQuote{smooth.spline} + +In this model, specified via \code{dive.model="smooth.spline"}, a +smoothing spline is used to model each dive (see +\code{\link{smooth.spline}}), using the chosen smoothing parameter. + +Dive phases identified via this model, however, are highly sensitive to +the degree of smoothing (\code{smooth.par}) used, thus making it +difficult to determine what amount of smoothing is adequate. + +A comparison of these methods is shown in the Examples section of +\code{\link{diveModel}}. + +The first derivate of the spline is evaluated at a set of knots to +calculate the vertical rate throughout the dive and determine the end +of descent and beginning of ascent. This set of knots is established +using a regular time sequence with beginning and end equal to the +extremes of the input sequence, and with length equal to \eqn{N \times +knot.factor}{N * \code{knot.factor}}. Equivalent procedures are used +for detecting descent and ascent phases. + +Once one of the models above has been fitted to each dive, the quantile +corresponding to (\code{descent.crit.q}) of all the positive +derivatives (rate of descent) at the beginning of the dive is used as +threshold for determining the end of descent. Descent is deemed to +have ended at the \emph{first} minimum derivative, and the nearest +input time observation is considered to indicate the end of descent. +The sign of the comparisons is reversed for detecting the ascent. If +observed depth to the left and right of the derivative defining the +ascent are the same, the right takes precedence. + +The particular dive phase categories are subsequently defined using +simple set operations. } - -\note{Note that the condition implied with argument \code{wet.cond} is - evaluated after the ZOC procedure, so it can refer to corrected depth. - In many cases, not all variables in the \code{\link{TDR}} object are - sampled with the same frequency, so they may need to be interpolated - before using them for this purpose. Note also that any of these - variables may contain similar problems as those dealth with during - ZOC, so programming instruments to record depth only when wet is - likely the best way to ensure proper detection of wet/dry conditions.} - -\references{ - - Koellmann, C., Ickstadt, K. and Fried, R. (2014) Beyond unimodal - regression: modelling multimodality with piecewise unimodal, mixture - or additive regression. Technical Report - 8. \url{https://sfb876.tu-dortmund.de/FORSCHUNG/techreports.html}, SFB - 876, TU Dortmund - - Luque, S.P. and Fried, R. (2011) Recursive filtering for zero offset - correction of diving depth time series. PLoS ONE 6:e15850 - +\note{ +Note that the condition implied with argument \code{wet.cond} is + evaluated after the ZOC procedure, so it can refer to corrected + depth. In many cases, not all variables in the \code{\link{TDR}} + object are sampled with the same frequency, so they may need to be + interpolated before using them for this purpose. Note also that + any of these variables may contain similar problems as those dealth + with during ZOC, so programming instruments to record depth only + when wet is likely the best way to ensure proper detection of + wet/dry conditions. } - - -\value{An object of class \code{\link{TDRcalibrate}}.} - - -\seealso{\code{\link{TDRcalibrate}}, \code{\link{.zoc}}, - \code{\link{.depthFilter}}, \code{\link{.detPhase}}, - \code{\link{.detDive}}, \code{\link{plotTDR}}, and - \code{\link{plotZOC}} to visually assess ZOC procedure. See - \code{\link{diveModel}}, \code{\link{smooth.spline}}, - \code{\link{unireg}} for dive models.} - -% ------------------------------------------------------------------------- - \examples{ - data(divesTDR) divesTDR @@ -335,12 +301,27 @@ data(divesTDRzoc) knot.factor=20)) } - } - - -\author{Sebastian P. Luque \email{spluque@gmail.com}} - - -\keyword{manip}% at least one, from doc/KEYWORDS -\keyword{math}% __ONLY ONE__ keyword per line +\references{ +Koellmann, C., Ickstadt, K. and Fried, R. (2014) Beyond unimodal +regression: modelling multimodality with piecewise unimodal, mixture or +additive regression. Technical Report 8. +\url{https://sfb876.tu-dortmund.de/FORSCHUNG/techreports.html}, SFB 876, TU +Dortmund + +Luque, S.P. and Fried, R. (2011) Recursive filtering for zero offset +correction of diving depth time series. PLoS ONE 6:e15850 +} +\seealso{ +\code{\link{TDRcalibrate}}, \code{\link{.zoc}}, + \code{\link{.depthFilter}}, \code{\link{.detPhase}}, + \code{\link{.detDive}}, \code{\link{plotTDR}}, and + \code{\link{plotZOC}} to visually assess ZOC procedure. See + \code{\link{diveModel}}, \code{\link{smooth.spline}}, + \code{\link{unireg}} for dive models. +} +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} +\keyword{manip} +\keyword{math} diff --git a/man/calibrateSpeed.Rd b/man/calibrateSpeed.Rd index f2ca796..12ebbab 100644 --- a/man/calibrateSpeed.Rd +++ b/man/calibrateSpeed.Rd @@ -1,79 +1,65 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/calibrate.R \name{calibrateSpeed} \alias{calibrateSpeed} - -% ------------------------------------------------------------------------- - \title{Calibrate and build a "TDRcalibrate" object} - - -\description{These functions create a \code{\link{TDRcalibrate}} object - which is necessary to obtain dive summary statistics.} - - \usage{ -calibrateSpeed(x, tau=0.1, contour.level=0.1, z=0, bad=c(0, 0), - main=slot(getTDR(x), "file"), coefs, plot=TRUE, - postscript=FALSE, \ldots) +calibrateSpeed( + x, + tau = 0.1, + contour.level = 0.1, + z = 0, + bad = c(0, 0), + main = slot(getTDR(x), "file"), + coefs, + plot = TRUE, + postscript = FALSE, + ... +) } - - \arguments{ +\item{x}{An object of class \code{\link{TDR}} for +\code{\link{calibrateDepth}} or an object of class +\code{\link{TDRcalibrate}} for \code{\link{calibrateSpeed}}.} - \item{x}{An object of class \code{\link{TDR}} for - \code{\link{calibrateDepth}} or an object of class - \code{\link{TDRcalibrate}} for \code{\link{calibrateSpeed}}.} - - \item{tau}{numeric scalar: quantile on which to regress speed on rate - of depth change; passed to \code{\link[quantreg]{rq}}.} +\item{tau}{numeric scalar: quantile on which to regress speed on rate +of depth change; passed to \code{\link[quantreg]{rq}}.} - \item{contour.level}{numeric scalar: the mesh obtained from the - bivariate kernel density estimation corresponding to this contour - will be used for the quantile regression to define the calibration - line.} +\item{contour.level}{numeric scalar: the mesh obtained from the +bivariate kernel density estimation corresponding to this contour +will be used for the quantile regression to define the calibration +line.} - \item{z}{numeric scalar: only changes in depth larger than this value - will be used for calibration.} +\item{z}{numeric scalar: only changes in depth larger than this value +will be used for calibration.} - \item{bad}{numeric vector of length 2 indicating that only rates of - depth change and speed greater than the given value should be used - for calibration, respectively.} +\item{bad}{numeric vector of length 2 indicating that only rates of +depth change and speed greater than the given value should be used +for calibration, respectively.} - \item{coefs}{numeric: known speed calibration coefficients from - quantile regression as a vector of length 2 (intercept, slope). If - provided, these coefficients are used for calibrating speed, - ignoring all other arguments, except \code{x}.} +\item{main, ...}{Arguments passed to \code{\link{rqPlot}}.} - \item{main, \ldots}{Arguments passed to \code{\link{rqPlot}}.} +\item{coefs}{numeric: known speed calibration coefficients from +quantile regression as a vector of length 2 (intercept, slope). If +provided, these coefficients are used for calibrating speed, +ignoring all other arguments, except \code{x}.} - \item{plot}{logical: whether to plot the results.} - - \item{postscript}{logical: whether to produce postscript file output.} +\item{plot}{logical: whether to plot the results.} +\item{postscript}{logical: whether to produce postscript file output.} } - - -\details{This calibrates speed readings following the procedure outlined - in Blackwell et al. (1999).} - - -\value{An object of class \code{\link{TDRcalibrate}}.} - - -\seealso{\code{\link{TDRcalibrate}}} - - -\references{ - - Blackwell S, Haverl C, Le Boeuf B, Costa D (1999). A method for - calibrating swim-speed recorders. Marine Mammal Science - 15(3):894-905. - +\value{ +An object of class \code{\link{TDRcalibrate}}. +} +\description{ +These functions create a \code{\link{TDRcalibrate}} object which is +necessary to obtain dive summary statistics. +} +\details{ +This calibrates speed readings following the procedure outlined in +Blackwell et al. (1999). } - -% ------------------------------------------------------------------------- - \examples{ - \donttest{## Too long for checks ## Continuing the Example from '?calibrateDepth': utils::example("calibrateDepth", package="diveMove", @@ -85,12 +71,16 @@ vcalib <- calibrateSpeed(dcalib, z=2) vcalib } - } - - -\author{Sebastian P. Luque \email{spluque@gmail.com}} - - -\keyword{manip}% at least one, from doc/KEYWORDS -\keyword{math}% __ONLY ONE__ keyword per line +\references{ +Blackwell S, Haverl C, Le Boeuf B, Costa D (1999). A method for calibrating +swim-speed recorders. Marine Mammal Science 15(3):894-905. +} +\seealso{ +\code{\link{TDRcalibrate}} +} +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} +\keyword{manip} +\keyword{math} diff --git a/man/createTDR.Rd b/man/createTDR.Rd new file mode 100644 index 0000000..6be088a --- /dev/null +++ b/man/createTDR.Rd @@ -0,0 +1,125 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/AllMethod.R, R/readTDR.R +\name{createTDR} +\alias{createTDR} +\alias{readTDR} +\title{Read comma-delimited file with "TDR" data} +\usage{ +createTDR( + time, + depth, + concurrentData = data.frame(matrix(ncol = 0, nrow = length(time))), + speed = FALSE, + dtime, + file +) + +readTDR( + file, + dateCol = 1, + timeCol = 2, + depthCol = 3, + speed = FALSE, + subsamp = 5, + concurrentCols = 4:6, + dtformat = "\%d/\%m/\%Y \%H:\%M:\%S", + tz = "GMT", + ... +) +} +\arguments{ +\item{time}{A \code{POSIXct} object with date and time readings for +each reading.} + +\item{depth}{numeric vector with depth readings.} + +\item{concurrentData}{\code{\link{data.frame}} with additional, +concurrent data collected.} + +\item{speed}{logical: whether speed is included in one of the columns +of concurrentCols.} + +\item{dtime}{numeric scalar: sampling interval used in seconds. If +missing, it is calculated from the \code{time} argument.} + +\item{file}{character: a string indicating the path to the file to +read. This can also be a text-mode connection, as allowed in +\code{\link{read.csv}}.} + +\item{dateCol}{integer: column number containing dates, and optionally, +times.} + +\item{timeCol}{integer: column number with times.} + +\item{depthCol}{integer: column number containing depth readings.} + +\item{subsamp}{numeric scalar: subsample rows in \code{file} with +\code{subsamp} interval, in s.} + +\item{concurrentCols}{integer vector of column numbers to include as +concurrent data collected.} + +\item{dtformat}{character: a string specifying the format in which the +date and time columns, when pasted together, should be interpreted +(see \code{\link{strptime}}).} + +\item{tz}{character: a string indicating the time zone assumed for the +date and time readings.} + +\item{...}{Passed to \code{\link{read.csv}}} +} +\value{ +An object of class \code{\link{TDR}} or \code{\link{TDRspeed}}. +} +\description{ +Read a delimited (*.csv) file containing time-depth recorder +(\dfn{TDR}) data from various \acronym{TDR} models. Return a +\code{TDR} or \code{TDRspeed} object. \code{createTDR} creates an +object of one of these classes from other objects. +} +\details{ +The input file is assumed to have a header row identifying each field, +and all rows must be complete (i.e. have the same number of fields). +Field names need not follow any convention. However, depth and speed +are assumed to be in m, and \eqn{m \cdot s^{-1}}{m/s}, respectively, +for further analyses. + +If \var{speed} is TRUE and concurrentCols contains a column named speed +or velocity, then an object of class \code{\link{TDRspeed}} is created, +where speed is considered to be the column matching this name. +} +\section{Functions}{ +\itemize{ +\item \code{readTDR}: Create TDR object from file +}} + +\note{ +Although \code{\link{TDR}} and \code{\link{TDRspeed}} classes + check that time stamps are in increasing order, the integrity of + the input must be thoroughly verified for common errors present in + text output from \acronym{TDR} devices such as duplicate records, + missing time stamps and non-numeric characters in numeric fields. + These errors are much more efficiently dealt with outside of + \acronym{GNU} using tools like \code{GNU awk} or \code{GNU sed}, so + \code{\link{diveMove}} does not currently attempt to fix these + errors. +} +\examples{ +## Do example to define object zz with location of dataset +utils::example("dives", package="diveMove", + ask=FALSE, echo=FALSE) +srcfn <- basename(zz) +readTDR(zz, speed=TRUE, sep=";", na.strings="", as.is=TRUE) + +## Or more pedestrian +tdrX <- read.csv(zz, sep=";", na.strings="", as.is=TRUE) +date.time <- paste(tdrX$date, tdrX$time) +tdr.time <- as.POSIXct(strptime(date.time, format="\%d/\%m/\%Y \%H:\%M:\%S"), + tz="GMT") +createTDR(tdr.time, tdrX$depth, concurrentData=data.frame(speed=tdrX$speed), + file=srcfn, speed=TRUE) +} +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} +\keyword{manip} diff --git a/man/detDive-internal.Rd b/man/detDive-internal.Rd index 58ad2d3..bfaa4cc 100644 --- a/man/detDive-internal.Rd +++ b/man/detDive-internal.Rd @@ -1,56 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/detDive.R \name{detDive-internal} \alias{detDive-internal} - \alias{.detDive} - -% ------------------------------------------------------------------------- - \title{Detect dives from depth readings} - - -\description{Identify dives in \acronym{TDR} records based on a dive - threshold.} - - \usage{ .detDive(zdepth, act, dive.thr) } - - \arguments{ +\item{zdepth}{numeric vector of zero-offset corrected depths.} - \item{zdepth}{numeric vector of zero-offset corrected depths.} - - \item{act}{factor as long as \code{depth} coding activity, with levels - specified as in \code{\link{.detPhase}}.} - - \item{dive.thr}{numeric scalar: threshold depth below which an - underwater phase should be considered a dive.} +\item{act}{factor as long as \code{depth} coding activity, with levels +specified as in \code{\link{.detPhase}}.} +\item{dive.thr}{numeric scalar: threshold depth below which an +underwater phase should be considered a dive.} } +\value{ +A \code{\link{data.frame}} with the following elements for + \code{.detDive} +\item{dive.id}{Numeric vector numbering each dive in the record.} -\value{A \code{\link{data.frame}} with the following elements for - \code{.detDive} - - \item{dive.id}{Numeric vector numbering each dive in the record.} - - \item{dive.activity}{Factor with levels \dQuote{L}, \dQuote{W}, - \dQuote{U}, \dQuote{D}, and \dQuote{Z}, see \code{\link{.detPhase}}. - All levels may be represented.} - - \item{postdive.id}{Numeric vector numbering each postdive interval - with the same value as the preceding dive.} +\item{dive.activity}{Factor with levels \dQuote{L}, \dQuote{W}, +\dQuote{U}, \dQuote{D}, and \dQuote{Z}, see \code{\link{.detPhase}}. +All levels may be represented.} +\item{postdive.id}{Numeric vector numbering each postdive interval with +the same value as the preceding dive.} +} +\description{ +Identify dives in \acronym{TDR} records based on a dive threshold. } - - -\seealso{\code{\link{.detPhase}}, \code{\link{.zoc}}} - -% ------------------------------------------------------------------------- - \examples{ - \donttest{## Too long for checks ## Continuing the Example from '?calibrateDepth': utils::example("calibrateDepth", package="diveMove", @@ -64,10 +46,11 @@ gross.act <- getGAct(dcalib) detd <- diveMove:::.detDive(getDepth(tdr), gross.act[[2]], 3) } - } - -\author{Sebastian P. Luque \email{spluque@gmail.com}} - - -\keyword{internal}% at least one, from doc/KEYWORDS +\seealso{ +\code{\link{.detPhase}}, \code{\link{.zoc}} +} +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} +\keyword{internal} diff --git a/man/detPhase-internal.Rd b/man/detPhase-internal.Rd index e5516b7..b430677 100644 --- a/man/detPhase-internal.Rd +++ b/man/detPhase-internal.Rd @@ -1,71 +1,50 @@ -\name{detPhase-internal} -\alias{detPhase-internal} - +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/detPhase.R +\name{.detPhase} \alias{.detPhase} - -% ------------------------------------------------------------------------- - \title{Detect phases of activity from depth readings} - - - -\description{Functions to identify sections of a \acronym{TDR} record - displaying one of three possible activities: dry, wet, and trivial - wet.} - - \usage{ .detPhase(time, depth, dry.thr, wet.cond, wet.thr, interval) } -%- maybe also 'usage' for other objects documented here. - - \arguments{ +\item{time}{\code{POSIXct} object with date and time for all depths.} - \item{time}{\code{POSIXct} object with date and time for all depths.} - - \item{depth}{numeric vector with depth readings.} - - \item{dry.thr, wet.cond, wet.thr}{As passed from - \code{\link{calibrateDepth}}.} +\item{depth}{numeric vector with depth readings.} - \item{interval}{As passed from \code{\link{calibrateDepth}}; sampling - interval in seconds.} +\item{dry.thr, wet.cond, wet.thr}{As passed from +\code{\link{calibrateDepth}}.} +\item{interval}{As passed from \code{\link{calibrateDepth}}; sampling +interval in seconds.} } +\value{ +A list with components: +\item{phase.id}{Numeric vector identifying each activity phase, +starting from 1 for every input record.} -\details{See \code{\link{calibrateDepth}}.} - - -\value{A list with components: - - \item{phase.id}{Numeric vector identifying each activity phase, - starting from 1 for every input record.} +\item{activity}{Factor with levels \dQuote{L} indicating dry, +\dQuote{W} indicating wet, \dQuote{U} for underwater (above dive +criterion), \dQuote{D} for diving, \dQuote{Z} for trivial wet animal +activities. Only \dQuote{L}, \dQuote{W}, and \dQuote{Z} are actually +represented.} - \item{activity}{Factor with levels \dQuote{L} indicating dry, - \dQuote{W} indicating wet, \dQuote{U} for underwater (above dive - criterion), \dQuote{D} for diving, \dQuote{Z} for trivial wet animal - activities. Only \dQuote{L}, \dQuote{W}, and \dQuote{Z} are - actually represented.} - - \item{begin}{A \code{\link{POSIXct}} object as long as the number of - unique activity phases identified, indicating the start times for - each activity phase.} - - \item{end}{A \code{\link{POSIXct}} object as long as the number of - unique activity phases identified, indicating the end times for each - activity phase.} +\item{begin}{A \code{\link{POSIXct}} object as long as the number of +unique activity phases identified, indicating the start times for each +activity phase.} +\item{end}{A \code{\link{POSIXct}} object as long as the number of +unique activity phases identified, indicating the end times for each +activity phase.} +} +\description{ +Functions to identify sections of a \acronym{TDR} record displaying one +of three possible activities: dry, wet, and trivial wet. +} +\details{ +See \code{\link{calibrateDepth}}. } - - -\seealso{\code{\link{.detDive}}, \code{\link{calibrateDepth}}} - -% ------------------------------------------------------------------------- - \examples{ - data(divesTDR) depths <- getDepth(divesTDR) times <- getTime(divesTDR) @@ -76,10 +55,11 @@ detp <- diveMove:::.detPhase(times, depths, dry.thr=70, wet.thr=3610, plotTDR(times, depths) rect(xleft=detp$begin, xright=detp$end, ybottom=0, ytop=-4, col=seq_along(detp$begin)) - } - -\author{Sebastian P. Luque \email{spluque@gmail.com} and Andy Liaw.} - - -\keyword{internal}% at least one, from doc/KEYWORDS +\seealso{ +\code{\link{.detDive}}, \code{\link{calibrateDepth}} +} +\author{ +Sebastian P. Luque \email{spluque@gmail.com} and Andy Liaw. +} +\keyword{internal} diff --git a/man/distSpeed.Rd b/man/distSpeed.Rd index a448b23..fd0d36e 100644 --- a/man/distSpeed.Rd +++ b/man/distSpeed.Rd @@ -1,47 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/distSpeed.R \name{distSpeed} - \alias{distSpeed} - -% ------------------------------------------------------------------------- - \title{Calculate distance and speed between locations} - - -\description{Calculate distance, time difference, and speed between - pairs of points defined by latitude and longitude, given the time at - which all points were measured.} - - \usage{ -distSpeed(pt1, pt2, method=c("Meeus", "VincentyEllipsoid")) +distSpeed(pt1, pt2, method = c("Meeus", "VincentyEllipsoid")) } -%- maybe also 'usage' for other objects documented here. - - \arguments{ +\item{pt1}{A matrix or \code{\link{data.frame}} with three columns; the +first a \code{POSIXct} object with dates and times for all points, the +second and third numeric vectors of longitude and latitude for all points, +respectively, in decimal degrees.} - \item{pt1}{A matrix or \code{\link{data.frame}} with three columns; - the first a \code{POSIXct} object with dates and times for all - points, the second and third numeric vectors of longitude and - latitude for all points, respectively, in decimal degrees.} - - \item{pt2}{A matrix with the same size and structure as \code{pt1}.} - - \item{method}{character indicating which of the distance algorithms - from \code{\link[geosphere]{geosphere-package}} to use (only default - parameters used). Only \code{Meeus} and \code{VincentyEllipsoid} - are supported for now.} +\item{pt2}{A matrix with the same size and structure as \code{pt1}.} +\item{method}{character indicating which of the distance algorithms from +\code{\link[geosphere]{geosphere-package}} to use (only default parameters +used). Only \code{Meeus} and \code{VincentyEllipsoid} are supported for +now.} +} +\value{ +A matrix with three columns: distance (km), time difference (s), +and speed (m/s). +} +\description{ +Calculate distance, time difference, and speed between pairs of points +defined by latitude and longitude, given the time at which all points were +measured. } - - -\value{A matrix with three columns: distance (km), time difference (s), - and speed (m/s).} - -% ------------------------------------------------------------------------- - \examples{ - ## Using the Example from '?readLocs': utils::example("readLocs", package="diveMove", ask=FALSE, echo=FALSE) @@ -71,12 +58,9 @@ pts <- seq(10) locs[pts + 1, 3:5], method="VincentyEllipsoid")) meeus - vincenty - } - - -\author{Sebastian P. Luque \email{spluque@gmail.com}} - - -\keyword{math}% at least one, from doc/KEYWORDS +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} \keyword{manip} +\keyword{math} diff --git a/man/diveModel-class.Rd b/man/diveModel-class.Rd index 0b1c07d..55b3905 100644 --- a/man/diveModel-class.Rd +++ b/man/diveModel-class.Rd @@ -1,84 +1,61 @@ -% $Id: diveModel-class.Rd 200 2008-11-04 03:06:40Z sluque $ -\name{diveModel-class} -\Rdversion{1.1} +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/AllClass.R \docType{class} - +\name{diveModel-class} \alias{diveModel-class} \alias{diveModel} - -% ------------------------------------------------------------------------- - -\title{Class "diveModel" for representing a model for identifying dive - phases} - - -\description{ Details of model used to identify the different phases of - a dive. } - - -\section{Objects from the Class}{ Objects can be created by calls of the - form \code{new("diveModel", ...)}. - - \sQuote{diveModel} objects contain all relevant details of the process - to identify phases of a dive. Objects of this class are typically - generated during depth calibration, using - \code{\link{calibrateDepth}}, more specifically - \code{\link{.cutDive}}. - +\title{Class "diveModel" for representing a model for identifying dive phases} +\description{ +Details of model used to identify the different phases of a dive. } - - \section{Slots}{ - \describe{ +\describe{ +\item{\code{label.matrix}}{Object of class \code{"matrix"}. A 2-column +character matrix with row numbers matching each observation to the +full \code{\link{TDR}} object, and a vector labelling the phases of +each dive.} - \item{\code{label.matrix}:}{Object of class \code{"matrix"}. A - 2-column character matrix with row numbers matching each - observation to the full \code{\link{TDR}} object, and a vector - labelling the phases of each dive.} +\item{\code{model}}{Object of class \code{"character"}. A string identifying +the specific model fit to dives for the purpose of dive phase +identification. It should be one of \sQuote{smooth.spline} or +\sQuote{unimodal}.} - \item{\code{model}:}{Object of class \code{"character"}. A string - identifying the specific model fit to dives for the purpose of - dive phase identification. It should be one of - \sQuote{smooth.spline} or \sQuote{unimodal}.} +\item{\code{dive.spline}}{Object of class \code{"smooth.spline"}. Details of +cubic smoothing spline fit (see +\code{\link[stats]{smooth.spline}}).} - \item{\code{dive.spline}:}{Object of class \code{"smooth.spline"}. - Details of cubic smoothing spline fit (see - \code{\link[stats]{smooth.spline}}).} +\item{\code{spline.deriv}}{Object of class \code{"list"}. A list with the +first derivative of the smoothing spline (see +\code{\link[stats]{predict.smooth.spline}}).} - \item{\code{spline.deriv}:}{Object of class \code{"list"}. A list - with the first derivative of the smoothing spline (see - \code{\link[stats]{predict.smooth.spline}}).} +\item{\code{descent.crit}}{Object of class \code{"numeric"}. The index of the +observation at which the descent was deemed to have ended (from +initial surface observation).} - \item{\code{descent.crit}:}{Object of class \code{"numeric"}. The - index of the observation at which the descent was deemed to have - ended (from initial surface observation).} +\item{\code{ascent.crit}}{Object of class \code{"numeric"}. the index of the +observation at which the ascent was deemed to have ended (from +initial surface observation).} - \item{\code{ascent.crit}:}{Object of class \code{"numeric"}. the - index of the observation at which the ascent was deemed to have - ended (from initial surface observation).} +\item{\code{descent.crit.rate}}{Object of class \code{"numeric"}. The rate of +descent corresponding to the critical quantile used.} - \item{\code{descent.crit.rate}:}{Object of class \code{"numeric"}. - The rate of descent corresponding to the critical quantile - used.} +\item{\code{ascent.crit.rate}}{Object of class \code{"numeric"}. The rate of +ascent corresponding to the critical quantile used.} +}} - \item{\code{ascent.crit.rate}:}{Object of class \code{"numeric"}. - The rate of ascent corresponding to the critical quantile - used.} +\section{Objects from the Class}{ - } +Objects can be created by calls of the form \code{new("diveModel", + ...)}. +\sQuote{diveModel} objects contain all relevant details of the process to +identify phases of a dive. Objects of this class are typically generated +during depth calibration, using \code{\link{calibrateDepth}}, more +specifically \code{\link{.cutDive}}. } - -\seealso{\code{\link{getDiveDeriv}}, \code{\link{plotDiveModel}}} - -% ------------------------------------------------------------------------- - -\author{Sebastian P. Luque \email{spluque@gmail.com}} - \examples{ - showClass("diveModel") \donttest{## Too long for checks @@ -108,8 +85,11 @@ plotDiveModel(phases.spl, plotDiveModel(phases.uni, diveNo=paste(diveNo)) } - } - - +\seealso{ +\code{\link{getDiveDeriv}}, \code{\link{plotDiveModel}} +} +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} \keyword{classes} diff --git a/man/diveMove-defunct.Rd b/man/diveMove-defunct.Rd new file mode 100644 index 0000000..e068442 --- /dev/null +++ b/man/diveMove-defunct.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/diveMove-defunct.R +\name{diveMove-defunct} +\alias{diveMove-defunct} +\alias{bouts2.ll} +\alias{bouts2.LL} +\alias{bouts.mle} +\title{Defunct functions in package \sQuote{diveMove}} +\usage{ +bouts2.ll() + +bouts2.LL() + +bouts.mle() +} +\description{ +These functions are defunct and no longer available. +} +\section{\code{bouts2.ll} and \code{bouts2.LL}}{ + +These functions have been superseded by the new function generator +\code{\link{boutsMLEll.chooser}} +} + +\keyword{internal} diff --git a/man/diveMove-deprecated.Rd b/man/diveMove-deprecated.Rd new file mode 100644 index 0000000..9abd563 --- /dev/null +++ b/man/diveMove-deprecated.Rd @@ -0,0 +1,71 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/diveMove-deprecated.R +\name{diveMove-deprecated} +\alias{diveMove-deprecated} +\alias{bouts2.nlsFUN} +\alias{bouts2.nls} +\alias{bec2} +\alias{bec3} +\alias{bouts3.nlsFUN} +\alias{bouts3.nls} +\alias{bouts2.mleFUN} +\title{Deprecated functions in diveMove} +\usage{ +bouts2.nlsFUN(x, a1, lambda1, a2, lambda2) + +bouts2.nls(lnfreq, start, maxiter) + +bec2(fit) + +bec3(fit) + +bouts3.nlsFUN(x, a1, lambda1, a2, lambda2, a3, lambda3) + +bouts3.nls(lnfreq, start, maxiter) + +bouts2.mleFUN(x, p, lambda1, lambda2) +} +\description{ +These functions are provided for compatibility with older versions of +\sQuote{diveMove} only, and will be removed (defunct) in the next +release. +} +\section{\code{bouts2.nlsFUN}}{ + +For \code{bouts2.nlsFUN}, use \code{\link{boutsNLSll}}. +} + +\section{\code{bouts2.nls}}{ + +For \code{bouts2.nls}, use \code{\link{fitNLSbouts}}. +} + +\section{\code{bec2}}{ + +For \code{bec2}, use \code{\link{bec}}. +} + +\section{\code{bec3}}{ + +For \code{bec3}, use \code{\link{bec}}. +} + +\section{\code{bouts3.nlsFUN}}{ + +For \code{bouts3.nlsFUN}, use \code{\link{boutsNLSll}}. +} + +\section{\code{bouts3.nls}}{ + +For \code{bouts3.nls}, use \code{\link{fitNLSbouts}}. +} + +\section{\code{bouts2.mleFUN}}{ + +For \code{bouts2.mleFUN}, use \code{\link{.bouts2MLEll}}. +} + +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} +\keyword{internal} diff --git a/man/diveMove-package.Rd b/man/diveMove-package.Rd index 632041e..4172951 100644 --- a/man/diveMove-package.Rd +++ b/man/diveMove-package.Rd @@ -10,12 +10,12 @@ \title{Dive Analysis and Calibration} -\description{This package is a collection of functions for visualizing, - and analyzing depth and speed data from time-depth recorders - \acronym{TDR}s. These can be used to zero-offset correct depth, - calibrate speed, and divide the record into different phases, or time - budget. Functions are provided for calculating summary dive - statistics for the whole record, or at smaller scales within dives.} +\description{This package is a collection of functions for visualizing and + analyzing depth and speed data from time-depth recorders \acronym{TDR}s. + These can be used to zero-offset correct depth, calibrate speed, and + divide the record into different phases, or time budget. Functions are + provided for calculating summary dive statistics for the whole record, or + at smaller scales within dives.} \seealso{A vignette with a guide to this package is available by doing diff --git a/man/diveStats.Rd b/man/diveStats.Rd index 69aa10b..529d0a4 100644 --- a/man/diveStats.Rd +++ b/man/diveStats.Rd @@ -1,136 +1,125 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/diveStats.R, R/oneDiveStats.R, R/stampDive.R \name{diveStats} - \alias{diveStats} \alias{oneDiveStats} \alias{stampDive} - -% ------------------------------------------------------------------------- - \title{Per-dive statistics} - - - -\description{Calculate dive statistics in \acronym{TDR} records.} - - \usage{ -diveStats(x, depth.deriv=TRUE) -oneDiveStats(x, interval, speed=FALSE) -stampDive(x, ignoreZ=TRUE) -} -%- maybe also 'usage' for other objects documented here. +diveStats(x, depth.deriv = TRUE) +oneDiveStats(x, interval, speed = FALSE) +stampDive(x, ignoreZ = TRUE) +} \arguments{ +\item{x}{A \code{\link{TDRcalibrate-class}} object for \code{diveStats} +and \code{stampDive}, and a \code{\link{data.frame}} containing a +single dive's data (a factor identifying the dive phases, a POSIXct +object with the time for each reading, a numeric depth vector, and +a numeric speed vector) for \code{oneDiveStats}.} - \item{x}{A \code{\link{TDRcalibrate-class}} object for - \code{diveStats} and \code{stampDive}, and a - \code{\link{data.frame}} containing a single dive's data (a factor - identifying the dive phases, a POSIXct object with the time for each - reading, a numeric depth vector, and a numeric speed vector) for - \code{oneDiveStats}.} - - \item{depth.deriv}{logical: should depth derivative statistics be - calculated?} - - \item{interval}{numeric scalar: sampling interval for interpreting - \code{x}.} +\item{depth.deriv}{logical: should depth derivative statistics be +calculated?} - \item{speed}{logical: should speed statistics be calculated?} +\item{interval}{numeric scalar: sampling interval for interpreting +\code{x}.} - \item{ignoreZ}{logical: whether phases should be numbered considering - all aquatic activities (\dQuote{W} and \dQuote{Z}) or ignoring - \dQuote{Z} activities.} +\item{speed}{logical: should speed statistics be calculated?} +\item{ignoreZ}{logical: whether phases should be numbered considering +all aquatic activities (\dQuote{W} and \dQuote{Z}) or ignoring +\dQuote{Z} activities.} } +\value{ +A \code{\link{data.frame}} with one row per dive detected + (durations are in s, and linear variables in m): +\item{begdesc}{A \code{POSIXct} object, specifying the start time of +each dive.} -\details{\code{diveStats} calculates various dive statistics based on - time and depth for an entire \acronym{TDR} record. - \code{oneDiveStats} obtains these statistics from a single dive, and - \code{stampDive} stamps each dive with associated phase information.} - - -\value{A \code{\link{data.frame}} with one row per dive detected - (durations are in s, and linear variables in m): - - \item{begdesc}{A \code{POSIXct} object, specifying the start time of - each dive.} +\item{enddesc}{A \code{POSIXct} object, as \code{begdesc} indicating +descent's end time.} - \item{enddesc}{A \code{POSIXct} object, as \code{begdesc} indicating - descent's end time.} +\item{begasc}{A \code{POSIXct} object, as \code{begdesc} indicating the +time ascent began.} - \item{begasc}{A \code{POSIXct} object, as \code{begdesc} indicating - the time ascent began.} +\item{desctim}{Descent duration of each dive.} - \item{desctim}{Descent duration of each dive.} +\item{botttim}{Bottom duration of each dive.} - \item{botttim}{Bottom duration of each dive.} +\item{asctim}{Ascent duration of each dive.} - \item{asctim}{Ascent duration of each dive.} +\item{divetim}{Dive duration.} - \item{divetim}{Dive duration.} +\item{descdist}{Numeric vector with last descent depth.} - \item{descdist}{Numeric vector with last descent depth.} +\item{bottdist}{Numeric vector with the sum of absolute depth +differences while at the bottom of each dive; measure of amount of +\dQuote{wiggling} while at bottom.} - \item{bottdist}{Numeric vector with the sum of absolute depth - differences while at the bottom of each dive; measure of amount of - \dQuote{wiggling} while at bottom.} +\item{ascdist}{Numeric vector with first ascent depth.} - \item{ascdist}{Numeric vector with first ascent depth.} +\item{bottdep.mean}{Mean bottom depth.} - \item{bottdep.mean}{Mean bottom depth.} +\item{bottdep.median}{Median bottom depth.} - \item{bottdep.median}{Median bottom depth.} +\item{bottdep.sd}{Standard deviation of bottom depths.} - \item{bottdep.sd}{Standard deviation of bottom depths.} +\item{maxdep}{Numeric vector with maximum depth.} - \item{maxdep}{Numeric vector with maximum depth.} +\item{desc.tdist}{Numeric vector with descent total distance, estimated +from speed.} - \item{desc.tdist}{Numeric vector with descent total distance, - estimated from speed.} +\item{desc.mean.speed}{Numeric vector with descent mean speed.} - \item{desc.mean.speed}{Numeric vector with descent mean speed.} +\item{desc.angle}{Numeric vector with descent angle, from the surface +plane.} - \item{desc.angle}{Numeric vector with descent angle, from the surface - plane.} +\item{bott.tdist}{Numeric vector with bottom total distance, estimated +from speed.} - \item{bott.tdist}{Numeric vector with bottom total distance, estimated - from speed.} +\item{bott.mean.speed}{Numeric vector with bottom mean speed.} - \item{bott.mean.speed}{Numeric vector with bottom mean speed.} +\item{asc.tdist}{Numeric vector with ascent total distance, estimated +from speed.} - \item{asc.tdist}{Numeric vector with ascent total distance, estimated - from speed.} +\item{asc.mean.speed}{Numeric vector with ascent mean speed.} - \item{asc.mean.speed}{Numeric vector with ascent mean speed.} +\item{asc.angle}{Numeric vector with ascent angle, from the bottom plane.} - \item{asc.angle}{Numeric vector with ascent angle, from the bottom - plane.} +\item{postdive.dur}{Postdive duration.} - \item{postdive.dur}{Postdive duration.} +\item{postdive.tdist}{Numeric vector with postdive total distance, +estimated from speed.} - \item{postdive.tdist}{Numeric vector with postdive total distance, - estimated from speed.} +\item{postdive.mean.speed}{Numeric vector with postdive mean speed.} - \item{postdive.mean.speed}{Numeric vector with postdive mean speed.} +If \code{depth.deriv=TRUE}, 21 additional columns with the minimum, +first quartile, median, mean, third quartile, maximum, and standard +deviation of the depth derivative for each phase of the dive. The +number of columns also depends on argument \code{speed}. - If \code{depth.deriv=TRUE}, 21 additional columns with the minimum, - first quartile, median, mean, third quartile, maximum, and standard - deviation of the depth derivative for each phase of the dive. The - number of columns also depends on argument \code{speed}. - - \code{stampDive} returns a \code{\link{data.frame}} with phase number, - activity, and start and end times for each dive.} - - -\seealso{\code{\link{calibrateDepth}}, \code{\link{.detPhase}}, - \code{\link{TDRcalibrate-class}}} +\code{stampDive} returns a \code{\link{data.frame}} with phase number, +activity, and start and end times for each dive. +} +\description{ +Calculate dive statistics in \acronym{TDR} records. +} +\details{ +\code{diveStats} calculates various dive statistics based on time and +depth for an entire \acronym{TDR} record. \code{oneDiveStats} obtains +these statistics from a single dive, and \code{stampDive} stamps each +dive with associated phase information. +} +\section{Functions}{ +\itemize{ +\item \code{oneDiveStats}: Calculate dive statistics for a single dive -% ------------------------------------------------------------------------- +\item \code{stampDive}: Stamp dives +}} \examples{ - \donttest{## Too long for checks ## Continuing the Example from '?calibrateDepth': utils::example("calibrateDepth", package="diveMove", @@ -143,12 +132,13 @@ tdrX.tab <- data.frame(stamps, tdrX) summary(tdrX.tab) } - } - - -\author{Sebastian P. Luque \email{spluque@gmail.com}} - - -\keyword{arith}% at least one, from doc/KEYWORDS +\seealso{ +\code{\link{calibrateDepth}}, \code{\link{.detPhase}}, + \code{\link{TDRcalibrate-class}} +} +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} +\keyword{arith} \keyword{math} diff --git a/man/dot-runquantile.Rd b/man/dot-runquantile.Rd new file mode 100644 index 0000000..27b9b91 --- /dev/null +++ b/man/dot-runquantile.Rd @@ -0,0 +1,208 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/runquantile.R +\name{.runquantile} +\alias{.runquantile} +\title{Quantile of Moving Window} +\usage{ +.runquantile( + x, + k, + probs, + type = 7, + endrule = c("quantile", "NA", "trim", "keep", "constant", "func"), + align = c("center", "left", "right") +) +} +\arguments{ +\item{x}{numeric vector of length n or matrix with n rows. If \code{x} +is a matrix than each column will be processed separately.} + +\item{k}{width of moving window; must be an integer between one and n.} + +\item{probs}{numeric vector of probabilities with values in [0,1] range +used by \code{runquantile}.} + +\item{type}{an integer between 1 and 9 selecting one of the nine +quantile algorithms, same as \code{type} in \code{\link{quantile}} +function. Another even more readable description of nine ways to +calculate quantiles can be found at +\url{http://mathworld.wolfram.com/Quantile.html}.} + +\item{endrule}{character string indicating how the values at the beginning and the +end, of the array, should be treated. Only first and last \code{k2} +values at both ends are affected, where \code{k2} is the half-bandwidth +\code{k2 = k \%/\% 2}. + +* \code{"quantile"} Applies the \code{\link{quantile}} function to + smaller and smaller sections of the array. Equivalent to: \code{for(i + in 1:k2) out[i]=quantile(x[1:(i+k2)])}. +* \code{"trim"} Trim the ends; output array length is equal to + \code{length(x)-2*k2 (out = out[(k2+1):(n-k2)])}. This option mimics + output of \code{\link{apply}} \code{(\link{embed}(x,k),1,FUN)} and + other related functions. +* \code{"keep"} Fill the ends with numbers from \code{x} vector + \code{(out[1:k2] = x[1:k2])} +* \code{"constant"} Fill the ends with first and last calculated value + in output array \code{(out[1:k2] = out[k2+1])} +* \code{"NA"} Fill the ends with NA's \code{(out[1:k2] = NA)} +* \code{"func"} Same as \code{"quantile"} but implimented in R. This + option could be very slow, and is included mostly for testing} + +\item{align}{specifies whether result should be centered (default), +left-aligned or right-aligned. If \code{endrule}="quantile" then +setting \code{align} to "left" or "right" will fall back on slower +implementation equivalent to \code{endrule}="func".} +} +\value{ +If \code{x} is a matrix than function \code{runquantile} returns a +matrix of size [n \eqn{\times}{x} \code{\link{length}}(probs)]. If +\code{x} is vactor a than function \code{runquantile} returns a matrix +of size [\code{\link{dim}}(x) \eqn{\times}{x} +\code{\link{length}}(probs)]. If \code{endrule="trim"} the output will +have fewer rows. +} +\description{ +Moving (aka running, rolling) Window Quantile calculated over a vector +} +\details{ +Apart from the end values, the result of y = runquantile(x, k) is the +same as \dQuote{\code{for(j=(1+k2):(n-k2)) +y[j]=quintile(x[(j-k2):(j+k2)],na.rm = TRUE)}}. It can handle +non-finite numbers like NaN's and Inf's (like \code{\link{quantile}(x, +na.rm = TRUE)}). + +The main incentive to write this set of functions was relative slowness +of majority of moving window functions available in R and its packages. +All functions listed in "see also" section are slower than very +inefficient \dQuote{\code{\link{apply}(\link{embed}(x,k),1,FUN)}} +approach. Relative speeds of \code{runquantile} is O(n*k) + +Function \code{runquantile} uses insertion sort to sort the moving +window, but gain speed by remembering results of the previous +sort. Since each time the window is moved, only one point changes, all +but one points in the window are already sorted. Insertion sort can fix +that in O(k) time. +} +\examples{ +## show plot using runquantile +k <- 31; n <- 200 +x <- rnorm(n, sd=30) + abs(seq(n)-n/4) +y <- diveMove:::.runquantile(x, k, probs=c(0.05, 0.25, 0.5, 0.75, 0.95)) +col <- c("black", "red", "green", "blue", "magenta", "cyan") +plot(x, col=col[1], main="Moving Window Quantiles") +lines(y[,1], col=col[2]) +lines(y[,2], col=col[3]) +lines(y[,3], col=col[4]) +lines(y[,4], col=col[5]) +lines(y[,5], col=col[6]) +lab=c("data", "runquantile(.05)", "runquantile(.25)", "runquantile(0.5)", + "runquantile(.75)", "runquantile(.95)") +legend(0,230, lab, col=col, lty=1) + +## basic tests against apply/embed +a <- diveMove:::.runquantile(x, k, c(0.3, 0.7), endrule="trim") +b <- t(apply(embed(x, k), 1, quantile, probs=c(0.3, 0.7))) +eps <- .Machine$double.eps ^ 0.5 +stopifnot(all(abs(a - b) < eps)) + +## Test against loop approach + +## This test works fine at the R prompt but fails during package check - +## need to investigate +k <- 25; n <- 200 +x <- rnorm(n, sd=30) + abs(seq(n) - n / 4) # create random data +x[seq(1, n, 11)] <- NaN; # add NANs +k2 <- k \%/\% 2 +k1 <- k - k2 - 1 +a <- diveMove:::.runquantile(x, k, probs=c(0.3, 0.8)) +b <- matrix(0, n, 2) +for(j in 1:n) { + lo <- max(1, j - k1) + hi <- min(n, j + k2) + b[j, ] <- quantile(x[lo:hi], probs=c(0.3, 0.8), na.rm=TRUE) +} +## stopifnot(all(abs(a-b) nls object}, or numeric vector (valid +when \code{fit -> mle object}.} + +\item{bec.lty}{Line type specification for drawing the BEC reference +line.} + +\item{...}{Arguments passed to \code{\link{plot.default}}.} + +\item{xlab, ylab}{Label for x and y axis, respectively.} +} +\description{ +Plot fitted Poisson mixture model and data +} +\section{Methods (by class)}{ +\itemize{ +\item \code{fit = nls,obj = data.frame}: Plot fitted \code{nls} model on \code{data.frame} +object + +\item \code{fit = nls,obj = Bouts}: Plot fitted \code{nls} model on \code{Bouts} +object + +\item \code{fit = mle,obj = numeric}: Plot fitted \code{mle} model on \code{numeric} +object + +\item \code{fit = mle,obj = Bouts}: Plot fitted \code{mle} model on \code{Bouts} +object +}} + +\seealso{ +\code{\link{boutfreqs}}, \code{\link{fitNLSbouts}}, + \code{\link{fitMLEbouts}} +} +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} +\keyword{methods} +\keyword{models} +\keyword{plot} diff --git a/man/plotBoutsCDF.Rd b/man/plotBoutsCDF.Rd new file mode 100644 index 0000000..5551f58 --- /dev/null +++ b/man/plotBoutsCDF.Rd @@ -0,0 +1,57 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/AllMethod.R +\name{plotBoutsCDF,nls,numeric-method} +\alias{plotBoutsCDF,nls,numeric-method} +\alias{plotBoutsCDF} +\alias{plotBoutsCDF,nls,Bouts-method} +\alias{plotBoutsCDF,mle,numeric-method} +\alias{plotBoutsCDF,mle,Bouts-method} +\title{Plot empirical and deterministic cumulative frequency distribution +Poisson mixture data and model} +\usage{ +\S4method{plotBoutsCDF}{nls,numeric}(fit, obj, xlim, draw.bec = FALSE, bec.lty = 2, ...) + +\S4method{plotBoutsCDF}{nls,Bouts}(fit, obj, xlim, draw.bec = FALSE, bec.lty = 2, ...) + +\S4method{plotBoutsCDF}{mle,numeric}(fit, obj, xlim, draw.bec = FALSE, bec.lty = 2, ...) + +\S4method{plotBoutsCDF}{mle,Bouts}(fit, obj, xlim, draw.bec = FALSE, bec.lty = 2, ...) +} +\arguments{ +\item{fit}{Object of class \code{nls} or \code{mle}.} + +\item{obj}{Object of class \code{\link{Bouts}}.} + +\item{xlim}{2-length vector with limits for the x axis. If omitted, a +sensible default is calculated.} + +\item{draw.bec}{logical; whether to draw the BECs} + +\item{bec.lty}{Line type specification for drawing the BEC reference +line.} + +\item{...}{Arguments passed to \code{\link{plot.default}}.} +} +\description{ +Plot empirical and deterministic cumulative frequency distribution +Poisson mixture data and model +} +\section{Methods (by class)}{ +\itemize{ +\item \code{fit = nls,obj = numeric}: Plot (E)CDF on \code{\link{nls}} fit object +and numeric vector + +\item \code{fit = nls,obj = Bouts}: Plot (E)CDF on \code{\link{nls}} fit object +and \code{\link{Bouts}} object + +\item \code{fit = mle,obj = numeric}: Plot (E)CDF on numeric vector + +\item \code{fit = mle,obj = Bouts}: Plot (E)CDF on \code{\link{mle}} fit object +}} + +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} +\keyword{methods} +\keyword{models} +\keyword{plot} diff --git a/man/plotDiveModel-methods.Rd b/man/plotDiveModel-methods.Rd deleted file mode 100644 index efbea9a..0000000 --- a/man/plotDiveModel-methods.Rd +++ /dev/null @@ -1,123 +0,0 @@ -% $Id: plotDiveModel-methods.Rd 200 2008-11-04 03:06:40Z sluque $ -\name{plotDiveModel-methods} -\docType{methods} -\alias{plotDiveModel-methods} - -\alias{plotDiveModel} -\alias{plotDiveModel,diveModel,missing-method} -\alias{plotDiveModel,numeric,numeric-method} -\alias{plotDiveModel,TDRcalibrate,missing-method} - -% ------------------------------------------------------------------------- - -\title{ Methods for plotting models of dive phases } - - -\description{ Methods for function \code{plotDiveModel}. } - - -\usage{ -\S4method{plotDiveModel}{diveModel,missing}(x, diveNo) - -\S4method{plotDiveModel}{numeric,numeric}(x, y, times.s, depths.s, d.crit, a.crit, - diveNo=1, times.deriv, depths.deriv, - d.crit.rate, a.crit.rate) - -\S4method{plotDiveModel}{TDRcalibrate,missing}(x, diveNo) -} - - -\arguments{ - - \item{x}{A \code{\link{diveModel}} (diveMode,missing method), - \code{\link{numeric}} vector of time step observations - (numeric,numeric method), or \code{\link{TDRcalibrate}} object - (TDRcalibrate,numeric method).} - - \item{diveNo}{integer representing the dive number selected for - plotting.} - - \item{y}{numeric vector with depth observations at each time step.} - - \item{times.s}{numeric vector with time steps used to generate the - smoothing spline (i.e. the knots, see \code{\link{diveModel}}).} - - \item{depths.s}{numeric vector with smoothed depth (see - \code{\link{diveModel}}).} - - \item{d.crit}{integer denoting the index where descent ends in the - observed time series (see \code{\link{diveModel}}).} - - \item{a.crit}{integer denoting the index where ascent begins in the - observed time series (see \code{\link{diveModel}}).} - - \item{times.deriv}{numeric vector representing the time steps where - the derivative of the smoothing spline was evaluated (see - \code{\link{diveModel}}).} - - \item{depths.deriv}{numeric vector representing the derivative of the - smoothing spline evaluated at \code{times.deriv} (see - \code{\link{diveModel}}).} - - \item{d.crit.rate}{numeric scalar: vertical rate of descent - corresponding to the quantile used (see \code{\link{diveModel}}).} - - \item{a.crit.rate}{numeric scalar: vertical rate of ascent - corresponding to the quantile used (see \code{\link{diveModel}}).} - -} - - -\section{Methods}{ - - All methods produce a double panel plot. The top panel shows the - depth against time, the cubic spline smoother, the identified descent - and ascent phases (which form the basis for identifying the rest of - the dive phases), while the bottom panel shows the first derivative of - the smooth trace. - - \describe{ - - \item{\code{signature(x = "diveModel", y = "missing")}}{ Given a - \code{\link{diveModel}} object and (possibly) the dive number that - it corresponds to, the plot shows the model data. } - - \item{\code{signature(x = "numeric", y = "numeric")}}{ This is the - main method, which requires all aspects of the model to be - provided. } - - \item{\code{signature(x = "TDRcalibrate", y = "missing")}}{ Given a - \code{\link{TDRcalibrate}} object and a dive number to extract - from it, this method plots the observed data and the model. The - intended use of this method is through \code{\link{plotTDR}} when - \code{what="dive.model"}. } - -}} - - -\seealso{ \code{\link{diveModel}} } - -% ------------------------------------------------------------------------- - -\examples{ - -\donttest{## Too long for checks -## Continuing the Example from '?calibrateDepth': -utils::example("calibrateDepth", package="diveMove", - ask=FALSE, echo=FALSE, run.donttest=TRUE) - -## 'diveModel' method -dm <- getDiveModel(dcalib, 100) -plotDiveModel(dm, diveNo=100) - -## 'TDRcalibrate' method -plotDiveModel(dcalib, diveNo=100) - -} - -} - - -\author{Sebastian P. Luque \email{spluque@gmail.com}} - -\keyword{methods} diff --git a/man/plotDiveModel.Rd b/man/plotDiveModel.Rd new file mode 100644 index 0000000..e1e0de7 --- /dev/null +++ b/man/plotDiveModel.Rd @@ -0,0 +1,109 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/AllMethod.R +\name{plotDiveModel,diveModel,missing-method} +\alias{plotDiveModel,diveModel,missing-method} +\alias{plotDiveModel} +\alias{plotDiveModel,TDRcalibrate,missing-method} +\alias{plotDiveModel,numeric,numeric-method} +\title{Methods for plotting models of dive phases} +\usage{ +\S4method{plotDiveModel}{diveModel,missing}(x, diveNo) + +\S4method{plotDiveModel}{TDRcalibrate,missing}(x, diveNo) + +\S4method{plotDiveModel}{numeric,numeric}( + x, + y, + times.s, + depths.s, + d.crit, + a.crit, + diveNo = 1, + times.deriv, + depths.deriv, + d.crit.rate, + a.crit.rate +) +} +\arguments{ +\item{x}{A \code{\link{diveModel}} (diveModel,missing method), +\code{\link{numeric}} vector of time step observations +(numeric,numeric method), or \code{\link{TDRcalibrate}} object +(TDRcalibrate,numeric method).} + +\item{diveNo}{integer representing the dive number selected for +plotting.} + +\item{y}{numeric vector with depth observations at each time step.} + +\item{times.s}{numeric vector with time steps used to generate the +smoothing spline (i.e. the knots, see \code{\link{diveModel}}).} + +\item{depths.s}{numeric vector with smoothed depth (see +\code{\link{diveModel}}).} + +\item{d.crit}{integer denoting the index where descent ends in the +observed time series (see \code{\link{diveModel}}).} + +\item{a.crit}{integer denoting the index where ascent begins in the +observed time series (see \code{\link{diveModel}}).} + +\item{times.deriv}{numeric vector representing the time steps where the +derivative of the smoothing spline was evaluated (see +\code{\link{diveModel}}).} + +\item{depths.deriv}{numeric vector representing the derivative of the +smoothing spline evaluated at \code{times.deriv} (see +\code{\link{diveModel}}).} + +\item{d.crit.rate}{numeric scalar: vertical rate of descent +corresponding to the quantile used (see \code{\link{diveModel}}).} + +\item{a.crit.rate}{numeric scalar: vertical rate of ascent +corresponding to the quantile used (see \code{\link{diveModel}}).} +} +\description{ +All methods produce a double panel plot. The top panel shows the depth +against time, the cubic spline smoother, the identified descent and +ascent phases (which form the basis for identifying the rest of the +dive phases), while the bottom panel shows the first derivative of the +smooth trace. +} +\section{Methods (by class)}{ +\itemize{ +\item \code{x = diveModel,y = missing}: Given a \code{\link{diveModel}} object and +(possibly) the dive number that it corresponds to, the plot shows +the model data. + +\item \code{x = TDRcalibrate,y = missing}: Given a \code{\link{TDRcalibrate}} object and +a dive number to extract from it, this method plots the observed +data and the model. The intended use of this method is through +\code{\link{plotTDR}} when \code{what="dive.model"}. + +\item \code{x = numeric,y = numeric}: Base method, requiring all aspects of the +model to be provided. +}} + +\examples{ +\donttest{## Too long for checks + +## Continuing the Example from '?calibrateDepth': +utils::example("calibrateDepth", package="diveMove", + ask=FALSE, echo=FALSE, run.donttest=TRUE) + +## 'diveModel' method +dm <- getDiveModel(dcalib, 100) +plotDiveModel(dm, diveNo=100) + +## 'TDRcalibrate' method +plotDiveModel(dcalib, diveNo=100) + +} +} +\seealso{ +\code{\link{diveModel}} +} +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} +\keyword{methods} diff --git a/man/plotTDR-methods.Rd b/man/plotTDR-methods.Rd deleted file mode 100644 index 4c166b4..0000000 --- a/man/plotTDR-methods.Rd +++ /dev/null @@ -1,174 +0,0 @@ -\name{plotTDR-methods} -\docType{methods} -\alias{plotTDR-methods} - -\alias{plotTDR} - -\alias{plotTDR,POSIXt,numeric-method} -\alias{plotTDR,TDR,missing-method} -\alias{plotTDR,TDRcalibrate,missing-method} - -% ------------------------------------------------------------------------- - -\title{Methods for plotting objects of class "TDR" and "TDRcalibrate"} - - -\description{ Main plotting method for objects of these classes. Plot - and optionally set zero-offset correction windows in \acronym{TDR} - records, with the aid of a graphical user interface (GUI), allowing - for dynamic selection of offset and multiple time windows to perform - the adjustment. } - - -\usage{ - -\S4method{plotTDR}{POSIXt,numeric}(x, y, concurVars=NULL, xlim=NULL, depth.lim=NULL, - ylab.depth="depth (m)", concurVarTitles=deparse(substitute(concurVars)), - sunrise.time="06:00:00", sunset.time="18:00:00", night.col="gray60", - dry.time=NULL, phase.factor=NULL) -\S4method{plotTDR}{TDR,missing}(x, y, concurVars, concurVarTitles, \ldots) -\S4method{plotTDR}{TDRcalibrate,missing}(x, y, what=c("phases", "dive.model"), - diveNo=seq(max(getDAct(x, "dive.id"))), \ldots) - -} - - -\arguments{ - - \item{x}{\code{POSIXct} object with date and time, \code{\link{TDR}}, - or \code{\link{TDRcalibrate}} object.} - - \item{y}{numeric vector with depth in m.} - - \item{concurVars}{matrix with additional variables in each column to - plot concurrently with depth. For the (\code{TDR},\code{missing}) - and (\code{TDRcalibrate},\code{missing}) methods, a - \code{\link{character}} vector naming additional variables from the - \code{concurrentData} slot to plot, if any.} - - \item{xlim}{\code{POSIXct} or numeric vector of length 2, with lower - and upper limits of time to be plotted.} - - \item{depth.lim}{numeric vector of length 2, with the lower and upper - limits of depth to be plotted.} - - \item{ylab.depth}{character string to label the corresponding - y-axes.} - - \item{concurVarTitles}{character vector of titles to label each new - variable given in \var{concurVars}.} - - \item{sunrise.time, sunset.time}{character string with time of sunrise - and sunset, respectively, in 24 hr format. This is used for shading - night time.} - - \item{night.col}{color for shading night time.} - - \item{dry.time}{subset of time corresponding to observations - considered to be dry.} - - \item{phase.factor}{factor dividing rows into sections.} - - \item{\ldots}{Arguments for the \code{(POSIXt,numeric)} method. For - \code{(TDRcalibrate,missing)}, these are arguments for the appropriate - methods.} - - \item{diveNo}{numeric vector or scalar with dive numbers to plot.} - - \item{what}{character: what aspect of the \code{\link{TDRcalibrate}} - to plot, which selects the method to use for plotting.} - -} - - -\details{ - - This function is used primarily to correct drifts in the - pressure transducer of \acronym{TDR} records and noise in depth - measurements via method=\dQuote{visual} in - \code{\link{calibrateDepth}}. - -} - - -\section{Methods}{ - \describe{ - - \item{plotTDR}{\code{signature(x="TDR", y="numeric")}: interactive - graphical display of time-depth data, with zooming and panning - capabilities.} - - \item{plotTDR}{\code{signature(x="TDR", y="missing")}: As method - above.} - - \item{plotTDR}{\code{signature(x="TDRcalibrate", y="missing")}: plot - selected aspects of \code{\link{TDRcalibrate}} object. Currently, - two aspects have plotting methods: - - \itemize{ - - \item{ \code{phases} (Optional arguments: \code{concurVars}, - \code{surface}) Plots all dives, labelled by the activity - phase they belong to. It produces a plot consisting of one or - more panels; the first panel shows depth against time, and - additional panels show other concurrent data in the object. - Optional argument \code{concurVars} is a character vector - indicating which additional components from the - \code{concurrentData} slot to plot, if any. Optional argument - \code{surface} is a logical: whether to plot surface - readings. } - - \item{\code{dive.model} Plots the dive model for the selected - dive number (\code{diveNo} argument). } - - } - } - - } - -} - - -\value{ If called with the \code{interact} argument set to \code{TRUE}, - returns a list (invisibly) with as many components as sections of the - record that were zero-offset corrected, each consisting of two further - lists with the same components as those returned by - \code{\link{locator}}. } - - -\seealso{ \code{\link{calibrateDepth}}, \code{\link{.zoc}} } - -% ------------------------------------------------------------------------- - -\examples{ - -\donttest{## Too long for checks -## Continuing the Example from '?calibrateDepth': -utils::example("calibrateDepth", package="diveMove", - ask=FALSE, echo=FALSE, run.donttest=TRUE) -## Use interact=TRUE (default) to set an offset interactively -## Plot the 'TDR' object -plotTDR(getTime(divesTDR), getDepth(divesTDR)) -plotTDR(divesTDR) - -## Plot different aspects of the 'TDRcalibrate' object -plotTDR(dcalib) -plotTDR(dcalib, diveNo=19:25) -plotTDR(dcalib, what="dive.model", diveNo=25) -if (dev.interactive(orNone=TRUE)) { - ## Add surface observations and interact - plotTDR(dcalib, surface=TRUE) - ## Plot one dive - plotTDR(dcalib, diveNo=200) -} - -} - -} - - -\author{Sebastian P. Luque \email{spluque@gmail.com}, - with many ideas from CRAN package sfsmisc.} - -\keyword{methods} -\keyword{iplot} diff --git a/man/plotTDR.Rd b/man/plotTDR.Rd new file mode 100644 index 0000000..0fa4b0d --- /dev/null +++ b/man/plotTDR.Rd @@ -0,0 +1,147 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/AllMethod.R +\name{plotTDR,POSIXt,numeric-method} +\alias{plotTDR,POSIXt,numeric-method} +\alias{plotTDR} +\alias{plotTDR,TDR,missing-method} +\alias{plotTDR,TDRcalibrate,missing-method} +\title{Methods for plotting objects of class "TDR" and "TDRcalibrate"} +\usage{ +\S4method{plotTDR}{POSIXt,numeric}( + x, + y, + concurVars = NULL, + xlim = NULL, + depth.lim = NULL, + ylab.depth = "depth (m)", + concurVarTitles = deparse(substitute(concurVars)), + sunrise.time = "06:00:00", + sunset.time = "18:00:00", + night.col = "gray60", + dry.time = NULL, + phase.factor = NULL +) + +\S4method{plotTDR}{TDR,missing}(x, y, concurVars, concurVarTitles, ...) + +\S4method{plotTDR}{TDRcalibrate,missing}( + x, + y, + what = c("phases", "dive.model"), + diveNo = seq(max(getDAct(x, "dive.id"))), + ... +) +} +\arguments{ +\item{x}{\code{POSIXct} object with date and time, \code{\link{TDR}}, +or \code{\link{TDRcalibrate}} object.} + +\item{y}{numeric vector with depth in m.} + +\item{concurVars}{matrix with additional variables in each column to +plot concurrently with depth. For the (\code{TDR},\code{missing}) +and (\code{TDRcalibrate},\code{missing}) methods, a +\code{\link{character}} vector naming additional variables from the +\code{concurrentData} slot to plot, if any.} + +\item{xlim}{\code{POSIXct} or numeric vector of length 2, with lower +and upper limits of time to be plotted.} + +\item{depth.lim}{numeric vector of length 2, with the lower and upper +limits of depth to be plotted.} + +\item{ylab.depth}{character string to label the corresponding y-axes.} + +\item{concurVarTitles}{character vector of titles to label each new +variable given in \var{concurVars}.} + +\item{sunrise.time, sunset.time}{character string with time of sunrise +and sunset, respectively, in 24 hr format. This is used for +shading night time.} + +\item{night.col}{color for shading night time.} + +\item{dry.time}{subset of time corresponding to observations considered +to be dry.} + +\item{phase.factor}{factor dividing rows into sections.} + +\item{...}{Arguments for the \code{(POSIXt,numeric)} method. For +\code{(TDRcalibrate,missing)}, these are arguments for the +appropriate methods.} + +\item{what}{character: what aspect of the \code{\link{TDRcalibrate}} to +plot, which selects the method to use for plotting.} + +\item{diveNo}{numeric vector or scalar with dive numbers to plot.} +} +\value{ +If called with the \code{interact} argument set to \code{TRUE}, + returns a list (invisibly) with as many components as sections of + the record that were zero-offset corrected, each consisting of two + further lists with the same components as those returned by + \code{\link{locator}}. +} +\description{ +Main plotting method for objects of these classes. Plot and optionally +set zero-offset correction windows in \acronym{TDR} records, with the +aid of a graphical user interface (GUI), allowing for dynamic selection +of offset and multiple time windows to perform the adjustment. +} +\section{Methods (by class)}{ +\itemize{ +\item \code{x = POSIXt,y = numeric}: Base method plotting numeric vector against POSIXt +object + +\item \code{x = TDR,y = missing}: Interactive graphical display of time-depth data, +with zooming and panning capabilities. + +\item \code{x = TDRcalibrate,y = missing}: plot selected aspects of \code{\link{TDRcalibrate}} + object. Currently, two aspects have plotting methods: + +* \code{phases} (Optional arguments: \code{concurVars}, \code{surface}) + Plots all dives, labelled by the activity phase they belong to. It + produces a plot consisting of one or more panels; the first panel + shows depth against time, and additional panels show other concurrent + data in the object. Optional argument \code{concurVars} is a + character vector indicating which additional components from the + \code{concurrentData} slot to plot, if any. Optional argument + \code{surface} is a logical: whether to plot surface readings. + +* \code{dive.model} Plots the dive model for the selected dive number + (\code{diveNo} argument). +}} + +\examples{ +\donttest{## Too long for checks + +## Continuing the Example from '?calibrateDepth': +utils::example("calibrateDepth", package="diveMove", + ask=FALSE, echo=FALSE, run.donttest=TRUE) +## Use interact=TRUE (default) to set an offset interactively +## Plot the 'TDR' object +plotTDR(getTime(divesTDR), getDepth(divesTDR)) +plotTDR(divesTDR) + +## Plot different aspects of the 'TDRcalibrate' object +plotTDR(dcalib) +plotTDR(dcalib, diveNo=19:25) +plotTDR(dcalib, what="dive.model", diveNo=25) +if (dev.interactive(orNone=TRUE)) { + ## Add surface observations and interact + plotTDR(dcalib, surface=TRUE) + ## Plot one dive + plotTDR(dcalib, diveNo=200) +} + +} +} +\seealso{ +\code{\link{calibrateDepth}}, \code{\link{.zoc}} +} +\author{ +Sebastian P. Luque \email{spluque@gmail.com}, with many ideas + from CRAN package sfsmisc. +} +\keyword{iplot} +\keyword{methods} diff --git a/man/plotZOC-methods.Rd b/man/plotZOC-methods.Rd deleted file mode 100644 index df00cf5..0000000 --- a/man/plotZOC-methods.Rd +++ /dev/null @@ -1,137 +0,0 @@ -% $Id: $ -\name{plotZOC-methods} -\docType{methods} -\alias{plotZOC-methods} - -\alias{plotZOC} - -\alias{plotZOC,TDR,matrix-method} -\alias{plotZOC,TDR,TDRcalibrate-method} - -% ------------------------------------------------------------------------- - -\title{Methods for visually assessing results of ZOC procedure} - - -\description{ Plots for comparing the zero-offset corrected depth from a - \code{\link{TDRcalibrate}} object with the uncorrected data in a - \code{\link{TDR}} object, or the progress in each of the filters - during recursive filtering for ZOC (\code{\link{calibrateDepth}}). } - - -\usage{ - -\S4method{plotZOC}{TDR,matrix}(x, y, xlim, ylim, ylab="Depth (m)", \ldots) -\S4method{plotZOC}{TDR,TDRcalibrate}(x, y, xlim, ylim, ylab="Depth (m)", \ldots) - -} - - -\arguments{ - - \item{x}{\code{TDR} object.} - - \item{y}{matrix with the same number of rows as there are observations - in \code{x}, or a \code{TDRcalibrate} object.} - - \item{xlim}{\code{POSIXct} or numeric vector of length 2, with lower - and upper limits of time to be plotted. Defaults to time range of - input.} - - \item{ylim}{numeric vector of length 2 (upper, lower) with axis - limits. Defaults to range of input.} - - \item{ylab}{character strings to label the corresponding y-axis.} - - \item{\ldots}{Arguments passed to \code{\link{legend}}.} - -} - - -\details{ - - The \code{TDR},\code{matrix} method produces a plot like those shown - in Luque and Fried (2011). - - The \code{TDR},\code{TDRcalibrate} method overlays the corrected depth - from the second argument over that from the first. - -} - - -\references{ - - Luque, S.P. and Fried, R. (2011) Recursive filtering for zero offset - correction of diving depth time series. PLoS ONE 6:e15850 - -} - - -\section{Methods}{ - \describe{ - - \item{plotTDR}{\code{signature(x="TDR", y="matrix")}: This plot - helps in finding appropriate parameters for - \code{diveMove:::.depthFilter}, and consists of three panels. The - upper panel shows the original data, the middle panel shows the - filters, and the last panel shows the corrected - data. method=\dQuote{visual} in \code{\link{calibrateDepth}}.} - - \item{plotTDR}{\code{signature(x="TDR", y="TDRcalibrate")}: This - plots depth from the \code{TDRcalibrate} object over the one from - the \code{TDR} object.} - - } - -} - - -\value{ Nothing; a plot as side effect. } - - -\seealso{ \code{\link{calibrateDepth}}, \code{\link{.zoc}} } - -% ------------------------------------------------------------------------- - -\examples{ - -## Using the Example from '?diveStats': - -\donttest{## Too long for checks -utils::example("diveStats", package="diveMove", - ask=FALSE, echo=FALSE, run.donttest=TRUE) - -## Plot filters for ZOC -## Work on first phase (trip) subset, to save processing time, since -## there's no drift nor shifts between trips -tdr <- divesTDR[1:15000] -## Try window widths (K), quantiles (P) and bound the search (db) -K <- c(3, 360); P <- c(0.5, 0.02); db <- c(0, 5) -d.filter <- diveMove:::.depthFilter(depth=getDepth(tdr), - k=K, probs=P, depth.bounds=db, - na.rm=TRUE) -old.par <- par(no.readonly=TRUE) -plotZOC(tdr, d.filter, ylim=c(0, 6)) -par(old.par) - -## Plot corrected and uncorrected depth, regardless of method -## Look at three different scales -xlim1 <- c(getTime(divesTDR)[7100], getTime(divesTDR)[11700]) -xlim2 <- c(getTime(divesTDR)[7100], getTime(divesTDR)[7400]) -xlim3 <- c(getTime(divesTDR)[7100], getTime(divesTDR)[7200]) -par(mar=c(3, 4, 0, 1) + 0.1, cex=1.1, las=1) -layout(seq(3)) -plotZOC(divesTDR, dcalib, xlim=xlim1, ylim=c(0, 6)) -plotZOC(divesTDR, dcalib, xlim=xlim2, ylim=c(0, 70)) -plotZOC(divesTDR, dcalib, xlim=xlim3, ylim=c(0, 70)) -par(old.par) - -} - -} - - -\author{Sebastian P. Luque \email{spluque@gmail.com}} - -\keyword{methods} -\keyword{iplot} diff --git a/man/plotZOC.Rd b/man/plotZOC.Rd new file mode 100644 index 0000000..3664f43 --- /dev/null +++ b/man/plotZOC.Rd @@ -0,0 +1,103 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/AllMethod.R +\name{plotZOC,TDR,matrix-method} +\alias{plotZOC,TDR,matrix-method} +\alias{plotZOC} +\alias{plotZOC,TDR,TDRcalibrate-method} +\title{Methods for visually assessing results of ZOC procedure} +\usage{ +\S4method{plotZOC}{TDR,matrix}(x, y, xlim, ylim, ylab = "Depth (m)", ...) + +\S4method{plotZOC}{TDR,TDRcalibrate}(x, y, xlim, ylim, ylab = "Depth (m)", ...) +} +\arguments{ +\item{x}{\code{TDR} object.} + +\item{y}{matrix with the same number of rows as there are observations +in \code{x}, or a \code{TDRcalibrate} object.} + +\item{xlim}{\code{POSIXct} or numeric vector of length 2, with lower +and upper limits of time to be plotted. Defaults to time range of +input.} + +\item{ylim}{numeric vector of length 2 (upper, lower) with axis limits. +Defaults to range of input.} + +\item{ylab}{character strings to label the corresponding y-axis.} + +\item{...}{Arguments passed to \code{\link{legend}}.} +} +\value{ +Nothing; a plot as side effect. +} +\description{ +Plots for comparing the zero-offset corrected depth from a +\code{\link{TDRcalibrate}} object with the uncorrected data in a +\code{\link{TDR}} object, or the progress in each of the filters during +recursive filtering for ZOC (\code{\link{calibrateDepth}}). +} +\details{ +The \code{TDR},\code{matrix} method produces a plot like those shown in +Luque and Fried (2011). + +The \code{TDR},\code{TDRcalibrate} method overlays the corrected depth +from the second argument over that from the first. +} +\section{Methods (by class)}{ +\itemize{ +\item \code{x = TDR,y = matrix}: This plot helps in finding appropriate parameters +for \code{diveMove:::.depthFilter}, and consists of three panels. +The upper panel shows the original data, the middle panel shows the +filters, and the last panel shows the corrected +data. method=\dQuote{visual} in \code{\link{calibrateDepth}}. + +\item \code{x = TDR,y = TDRcalibrate}: This plots depth from the \code{TDRcalibrate} +object over the one from the \code{TDR} object. +}} + +\examples{ +## Using the Example from '?diveStats': +\donttest{## Too long for checks + +utils::example("diveStats", package="diveMove", + ask=FALSE, echo=FALSE, run.donttest=TRUE) + +## Plot filters for ZOC +## Work on first phase (trip) subset, to save processing time, since +## there's no drift nor shifts between trips +tdr <- divesTDR[1:15000] +## Try window widths (K), quantiles (P) and bound the search (db) +K <- c(3, 360); P <- c(0.5, 0.02); db <- c(0, 5) +d.filter <- diveMove:::.depthFilter(depth=getDepth(tdr), + k=K, probs=P, depth.bounds=db, + na.rm=TRUE) +old.par <- par(no.readonly=TRUE) +plotZOC(tdr, d.filter, ylim=c(0, 6)) +par(old.par) + +## Plot corrected and uncorrected depth, regardless of method +## Look at three different scales +xlim1 <- c(getTime(divesTDR)[7100], getTime(divesTDR)[11700]) +xlim2 <- c(getTime(divesTDR)[7100], getTime(divesTDR)[7400]) +xlim3 <- c(getTime(divesTDR)[7100], getTime(divesTDR)[7200]) +par(mar=c(3, 4, 0, 1) + 0.1, cex=1.1, las=1) +layout(seq(3)) +plotZOC(divesTDR, dcalib, xlim=xlim1, ylim=c(0, 6)) +plotZOC(divesTDR, dcalib, xlim=xlim2, ylim=c(0, 70)) +plotZOC(divesTDR, dcalib, xlim=xlim3, ylim=c(0, 70)) +par(old.par) + +} +} +\references{ +Luque, S.P. and Fried, R. (2011) Recursive filtering for zero offset +correction of diving depth time series. PLoS ONE 6:e15850 +} +\seealso{ +\code{\link{calibrateDepth}}, \code{\link{.zoc}} +} +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} +\keyword{iplot} +\keyword{methods} diff --git a/man/readLocs.Rd b/man/readLocs.Rd index a12eac3..13044f8 100644 --- a/man/readLocs.Rd +++ b/man/readLocs.Rd @@ -1,80 +1,80 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/readLocs.R \name{readLocs} \alias{readLocs} - -% ------------------------------------------------------------------------- - \title{Read comma-delimited file with location data} - - -\description{Read a delimited (*.csv) file with (at least) time, - latitude, longitude readings.} - - \usage{ -readLocs(locations, loc.idCol, idCol, dateCol, timeCol=NULL, - dtformat="\%m/\%d/\%Y \%H:\%M:\%S", tz="GMT", - classCol, lonCol, latCol, alt.lonCol=NULL, alt.latCol=NULL, \ldots) +readLocs( + locations, + loc.idCol, + idCol, + dateCol, + timeCol = NULL, + dtformat = "\%m/\%d/\%Y \%H:\%M:\%S", + tz = "GMT", + classCol, + lonCol, + latCol, + alt.lonCol = NULL, + alt.latCol = NULL, + ... +) } -%- maybe also 'usage' for other objects documented here. - - \arguments{ +\item{locations}{character: a string indicating the path to the file to +read, or a \code{\link{data.frame}} available in the search +list. Provide the entire path if the file is not on the current +directory. This can also be a text-mode connection, as allowed in +\code{\link{read.csv}}.} - \item{locations}{character: a string indicating the path to the file - to read, or a \code{\link{data.frame}} available in the search - list. Provide the entire path if the file is not on the current - directory. This can also be a text-mode connection, as allowed in - \code{\link{read.csv}}.} - - \item{loc.idCol}{integer: column number containing location ID. If - missing, a \code{loc.id} column is generated with sequential - integers as long as the input.} - - \item{idCol}{integer: column number containing an identifier for - locations belonging to different groups. If missing, an id column - is generated with number one repeated as many times as the input.} +\item{loc.idCol}{integer: column number containing location ID. If +missing, a \code{loc.id} column is generated with sequential +integers as long as the input.} - \item{dateCol}{integer: column number containing dates, and, - optionally, times.} +\item{idCol}{integer: column number containing an identifier for +locations belonging to different groups. If missing, an id column +is generated with number one repeated as many times as the input.} - \item{timeCol}{integer: column number containing times.} +\item{dateCol}{integer: column number containing dates, and, +optionally, times.} - \item{dtformat}{character: a string specifying the format in which the - date and time columns, when pasted together, should be interpreted - (see \code{\link{strptime}}) in \code{file}.} +\item{timeCol}{integer: column number containing times.} - \item{tz}{character: a string indicating the time zone for the date - and time readings.} +\item{dtformat}{character: a string specifying the format in which the +date and time columns, when pasted together, should be interpreted +(see \code{\link{strptime}}) in \code{file}.} - \item{lonCol}{integer: column number containing longitude readings.} +\item{tz}{character: a string indicating the time zone for the date and +time readings.} - \item{latCol}{integer: column number containing latitude readings.} +\item{classCol}{integer: column number containing the ARGOS rating for +each location.} - \item{classCol}{integer: column number containing the ARGOS rating for - each location.} +\item{lonCol}{integer: column number containing longitude readings.} - \item{alt.lonCol}{integer: column number containing alternative - longitude readings.} +\item{latCol}{integer: column number containing latitude readings.} - \item{alt.latCol}{integer: Column number containing alternative - latitude readings.} +\item{alt.lonCol}{integer: column number containing alternative +longitude readings.} - \item{\ldots}{Passed to \code{\link{read.csv}}} +\item{alt.latCol}{integer: Column number containing alternative +latitude readings.} +\item{...}{Passed to \code{\link{read.csv}}} +} +\value{ +A data frame. +} +\description{ +Read a delimited (*.csv) file with (at least) time, latitude, longitude +readings. +} +\details{ +The file must have a header row identifying each field, and all rows +must be complete (i.e. have the same number of fields). Field names +need not follow any convention. } - - -\details{The file must have a header row identifying each field, and all - rows must be complete (i.e. have the same number of fields). Field - names need not follow any convention.} - - -\value{A data frame.} - -% ------------------------------------------------------------------------- - \examples{ - ## Do example to define object zz with location of dataset utils::example("sealLocs", package="diveMove", ask=FALSE, echo=FALSE) @@ -83,11 +83,8 @@ locs <- readLocs(zz, idCol=1, dateCol=2, lonCol=4, latCol=5, sep=";") summary(locs) - } - - -\author{Sebastian P. Luque \email{spluque@gmail.com}} - - +\author{ +Sebastian P. Luque \email{spluque@gmail.com} +} \keyword{manip} diff --git a/man/readTDR.Rd b/man/readTDR.Rd deleted file mode 100644 index 19c637d..0000000 --- a/man/readTDR.Rd +++ /dev/null @@ -1,122 +0,0 @@ -\name{readTDR} - -\alias{readTDR} -\alias{createTDR} - -% ------------------------------------------------------------------------- - -\title{Read comma-delimited file with "TDR" data} - - - -\description{Read a delimited (*.csv) file containing time-depth - recorder (\dfn{TDR}) data from various \acronym{TDR} models. Return a - \code{TDR} or \code{TDRspeed} object. \code{createTDR} creates an - object of one of these classes from other objects.} - - -\usage{ -readTDR(file, dateCol=1, timeCol=2, depthCol=3, speed=FALSE, - subsamp=5, concurrentCols=4:6, - dtformat="\%d/\%m/\%Y \%H:\%M:\%S", tz="GMT", \ldots) -createTDR(time, depth, - concurrentData=data.frame(matrix(ncol=0, nrow=length(time))), - speed=FALSE, dtime, file) -} - - -\arguments{ - - \item{file}{character: a string indicating the path to the file to - read. This can also be a text-mode connection, as allowed in - \code{\link{read.csv}}.} - - \item{dateCol}{integer: column number containing dates, and - optionally, times.} - - \item{timeCol}{integer: column number with times.} - - \item{depthCol}{integer: column number containing depth readings.} - - \item{speed}{logical: whether speed is included in one of the columns - of concurrentCols.} - - \item{subsamp}{numeric scalar: subsample rows in \code{file} with - \code{subsamp} interval, in s.} - - \item{concurrentCols}{integer vector of column numbers to include as - concurrent data collected.} - - \item{dtformat}{character: a string specifying the format in which the - date and time columns, when pasted together, should be interpreted - (see \code{\link{strptime}}).} - - \item{tz}{character: a string indicating the time zone assumed for the - date and time readings.} - - \item{\ldots}{Passed to \code{\link{read.csv}}} - - \item{time}{A \code{POSIXct} object with date and time readings for - each reading.} - - \item{depth}{numeric vector with depth readings.} - - \item{concurrentData}{\code{\link{data.frame}} with additional, - concurrent data collected.} - - \item{dtime}{numeric scalar: sampling interval used in seconds. If - missing, it is calculated from the \code{time} argument.} - -} - - -\details{The input file is assumed to have a header row identifying each - field, and all rows must be complete (i.e. have the same number of - fields). Field names need not follow any convention. However, depth - and speed are assumed to be in m, and \eqn{m \cdot s^{-1}}{m/s}, - respectively, for further analyses. - - If \var{speed} is TRUE and concurrentCols contains a column named - speed or velocity, then an object of class \code{\link{TDRspeed}} is - created, where speed is considered to be the column matching this - name.} - - -\note{Although \code{\link{TDR}} and \code{\link{TDRspeed}} classes - check that time stamps are in increasing order, the integrity of the - input must be thoroughly verified for common errors present in text - output from \acronym{TDR} devices such as duplicate records, missing - time stamps and non-numeric characters in numeric fields. These - errors are much more efficiently dealt with outside of \acronym{GNU} - \R using tools like \code{GNU awk} or \code{GNU sed}, so - \code{\link{diveMove}} does not currently attempt to fix these - errors.} - - -\value{An object of class \code{\link{TDR}} or \code{\link{TDRspeed}}.} - -% ------------------------------------------------------------------------- - -\examples{ - -## Do example to define object zz with location of dataset -utils::example("dives", package="diveMove", - ask=FALSE, echo=FALSE) -srcfn <- basename(zz) -readTDR(zz, speed=TRUE, sep=";", na.strings="", as.is=TRUE) - -## Or more pedestrian -tdrX <- read.csv(zz, sep=";", na.strings="", as.is=TRUE) -date.time <- paste(tdrX$date, tdrX$time) -tdr.time <- as.POSIXct(strptime(date.time, format="\%d/\%m/\%Y \%H:\%M:\%S"), - tz="GMT") -createTDR(tdr.time, tdrX$depth, concurrentData=data.frame(speed=tdrX$speed), - file=srcfn, speed=TRUE) - -} - - -\author{Sebastian P. Luque \email{spluque@gmail.com}} - - -\keyword{manip}% at least one, from doc/KEYWORDS diff --git a/man/rmixexp.Rd b/man/rmixexp.Rd new file mode 100644 index 0000000..e3cb53e --- /dev/null +++ b/man/rmixexp.Rd @@ -0,0 +1,43 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/bouts_helpers.R +\name{rmixexp} +\alias{rmixexp} +\title{Generate samples from a mixture of exponential distributions} +\usage{ +rmixexp(n, p, lambdas) +} +\arguments{ +\item{n}{integer output sample size.} + +\item{p}{numeric probabilities for processes generating the output +mixture sample.} + +\item{lambdas}{numeric \code{lambda} (rate) for each process.} +} +\value{ +vector of samples. +} +\description{ +\code{rmixexp} uses a special definition for the probabilities +\code{p_i} to generate random samples from a mixed Poisson distribution +with known parameters for each process. In the two-process case, +\code{p} represents the proportion of "fast" to "slow" events in the +mixture. In the three-process case, \code{p_0} represents the +proportion of "fast" to "slow" events, and \code{p_1} represents the +proportion of "slow" to "slow" *and* "very slow" events. +} +\examples{ +## Draw samples from a mixture where the first process occurs with +## p < 0.7, and the second process occurs with the remaining +## probability. +p <- 0.7 +lda <- c(0.05, 0.005) +(rndprocs2 <- rmixexp(1000, p, lda)) + +## 3-process +p_f <- 0.6 # fast to slow +p_svs <- 0.7 # prop of slow to (slow + very slow) procs +p_true <- c(p_f, p_svs) +lda_true <- c(0.05, 0.01, 8e-4) +(rndprocs3 <- rmixexp(1000, p_true, lda_true)) +} diff --git a/man/rqPlot.Rd b/man/rqPlot.Rd index c0e2317..8902581 100644 --- a/man/rqPlot.Rd +++ b/man/rqPlot.Rd @@ -1,72 +1,67 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/calibrate.R \name{rqPlot} - \alias{rqPlot} - -% ------------------------------------------------------------------------- - \title{Plot of quantile regression for speed calibrations} - - - -\description{Plot of quantile regression for assessing quality of speed - calibrations} - - \usage{ -rqPlot(rddepth, speed, z, contours, rqFit, main="qtRegression", - xlab="rate of depth change (m/s)", ylab="speed (m/s)", - colramp=colorRampPalette(c("white", "darkblue")), - col.line="red", cex.pts=1) +rqPlot( + rddepth, + speed, + z, + contours, + rqFit, + main = "qtRegression", + xlab = "rate of depth change (m/s)", + ylab = "speed (m/s)", + colramp = colorRampPalette(c("white", "darkblue")), + col.line = "red", + cex.pts = 1 +) } -%- maybe also 'usage' for other objects documented here. - - \arguments{ +\item{rddepth}{numeric vector with rate of depth change.} - \item{speed}{numeric vector with speed in m/s.} - - \item{rddepth}{numeric vector with rate of depth change.} - - \item{z}{list with the bivariate kernel density estimates (1st - component the x points of the mesh, 2nd the y points, and 3rd the - matrix of densities).} +\item{speed}{numeric vector with speed in m/s.} - \item{contours}{list with components: \code{pts} which should be a - matrix with columns named \code{x} and \code{y}, \code{level} a - number indicating the contour level the points in \code{pts} - correspond to.} +\item{z}{list with the bivariate kernel density estimates (1st +component the x points of the mesh, 2nd the y points, and 3rd the +matrix of densities).} - \item{rqFit}{object of class \dQuote{rq} representing a quantile - regression fit of rate of depth change on mean speed.} +\item{contours}{list with components: \code{pts} which should be a +matrix with columns named \code{x} and \code{y}, \code{level} a +number indicating the contour level the points in \code{pts} +correspond to.} - \item{main}{character: string with title prefix to include in ouput - plot.} +\item{rqFit}{object of class \dQuote{rq} representing a quantile +regression fit of rate of depth change on mean speed.} - \item{xlab, ylab}{character vectors with axis labels.} +\item{main}{character: string with title prefix to include in ouput +plot.} - \item{colramp}{function taking an integer n as an argument and - returning n colors.} +\item{xlab, ylab}{character vectors with axis labels.} - \item{col.line}{color to use for the regression line.} +\item{colramp}{function taking an integer n as an argument and +returning n colors.} - \item{cex.pts}{numeric: value specifying the amount by which to - enlarge the size of points.} +\item{col.line}{color to use for the regression line.} +\item{cex.pts}{numeric: value specifying the amount by which to enlarge +the size of points.} +} +\description{ +Plot of quantile regression for assessing quality of speed calibrations +} +\details{ +The dashed line in the plot represents a reference indicating a one to +one relationship between speed and rate of depth change. The other +line represent the quantile regression fit. +} +\seealso{ +\code{\link{diveStats}} +} +\author{ +Sebastian P. Luque \email{spluque@gmail.com} } - - -\details{The dashed line in the plot represents a reference indicating a - one to one relationship between speed and rate of depth change. The - other line represent the quantile regression fit.} - - -\seealso{\code{\link{diveStats}}} - -% ------------------------------------------------------------------------- - -\author{Sebastian P. Luque \email{spluque@gmail.com}} - - -\keyword{manip}% at least one, from doc/KEYWORDS \keyword{arith} \keyword{hplot} +\keyword{manip} diff --git a/man/runquantile-internal.Rd b/man/runquantile-internal.Rd deleted file mode 100644 index e58fda4..0000000 --- a/man/runquantile-internal.Rd +++ /dev/null @@ -1,245 +0,0 @@ -\name{runquantile-internal} -\alias{runquantile-internal} - -\alias{.runquantile} - -% ------------------------------------------------------------------------- - -\title{Quantile of Moving Window} - - -\description{ - Moving (aka running, rolling) Window Quantile calculated over a vector -} - - -\usage{ -.runquantile(x, k, probs, type=7, - endrule=c("quantile", "NA", "trim", "keep", "constant", "func"), - align = c("center", "left", "right")) -} - - -\arguments{ - - \item{x}{numeric vector of length n or matrix with n rows. If \code{x} - is a matrix than each column will be processed separately.} - - \item{k}{width of moving window; must be an integer between one and - n.} - - \item{endrule}{character string indicating how the values at the - beginning and the end, of the array, should be treated. Only first - and last \code{k2} values at both ends are affected, where \code{k2} - is the half-bandwidth \code{k2 = k \%/\% 2}. - - \itemize{ - - \item \code{"quantile"} Applies the \code{\link{quantile}} - function to smaller and smaller sections of the - array. Equivalent to: \code{for(i in 1:k2) - out[i]=quantile(x[1:(i+k2)])}. - - \item \code{"trim"} Trim the ends; output array length is equal - to \code{length(x)-2*k2 (out = out[(k2+1):(n-k2)])}. This - option mimics output of \code{\link{apply}} - \code{(\link{embed}(x,k),1,FUN)} and other related functions. - - \item \code{"keep"} Fill the ends with numbers from \code{x} - vector \code{(out[1:k2] = x[1:k2])} - - \item \code{"constant"} Fill the ends with first and last - calculated value in output array \code{(out[1:k2] = out[k2+1])} - - \item \code{"NA"} Fill the ends with NA's \code{(out[1:k2] = NA)} - - \item \code{"func"} Same as \code{"quantile"} but implimented in - R. This option could be very slow, and is included mostly for - testing - - } - - } - - \item{probs}{numeric vector of probabilities with values in [0,1] - range used by \code{runquantile}.} - - \item{type}{an integer between 1 and 9 selecting one of the nine - quantile algorithms, same as \code{type} in \code{\link{quantile}} - function. Another even more readable description of nine ways to - calculate quantiles can be found at - \url{http://mathworld.wolfram.com/Quantile.html}.} - - \item{align}{specifies whether result should be centered (default), - left-aligned or right-aligned. If \code{endrule}="quantile" then - setting \code{align} to "left" or "right" will fall back on slower - implementation equivalent to \code{endrule}="func". } - -} - -\details{Apart from the end values, the result of y = runquantile(x, k) - is the same as \dQuote{\code{for(j=(1+k2):(n-k2)) - y[j]=quintile(x[(j-k2):(j+k2)],na.rm = TRUE)}}. It can handle - non-finite numbers like NaN's and Inf's (like \code{\link{quantile}(x, - na.rm = TRUE)}). - - The main incentive to write this set of functions was relative - slowness of majority of moving window functions available in R and its - packages. All functions listed in "see also" section are slower than - very inefficient \dQuote{\code{\link{apply}(\link{embed}(x,k),1,FUN)}} - approach. Relative speeds of \code{runquantile} is O(n*k) - - Function \code{runquantile} uses insertion sort to sort the moving - window, but gain speed by remembering results of the previous - sort. Since each time the window is moved, only one point changes, all - but one points in the window are already sorted. Insertion sort can - fix that in O(k) time. - -} - -\value{ - If \code{x} is a matrix than function \code{runquantile} returns a - matrix of size [n \eqn{\times}{x} \code{\link{length}}(probs)]. If - \code{x} is vactor a than function \code{runquantile} returns a matrix - of size [\code{\link{dim}}(x) \eqn{\times}{x} - \code{\link{length}}(probs)]. If \code{endrule="trim"} the output - will have fewer rows. -} - -\references{ - - \itemize{ - - \item About quantiles: Hyndman, R. J. and Fan, Y. (1996) - \emph{Sample quantiles in statistical packages, American - Statistician}, 50, 361. - - \item About quantiles: Eric W. Weisstein. \emph{Quantile}. From - MathWorld-- A Wolfram Web - Resource. \url{http://mathworld.wolfram.com/Quantile.html} - - \item About insertion sort used in \code{runmad} and - \code{runquantile}: R. Sedgewick (1988): - \emph{Algorithms}. Addison-Wesley (page 99) - - } - - } - -% ------------------------------------------------------------------------- - -\examples{ - ## show plot using runquantile - k <- 31; n <- 200 - x <- rnorm(n, sd=30) + abs(seq(n)-n/4) - y <- diveMove:::.runquantile(x, k, probs=c(0.05, 0.25, 0.5, 0.75, 0.95)) - col <- c("black", "red", "green", "blue", "magenta", "cyan") - plot(x, col=col[1], main="Moving Window Quantiles") - lines(y[,1], col=col[2]) - lines(y[,2], col=col[3]) - lines(y[,3], col=col[4]) - lines(y[,4], col=col[5]) - lines(y[,5], col=col[6]) - lab=c("data", "runquantile(.05)", "runquantile(.25)", "runquantile(0.5)", - "runquantile(.75)", "runquantile(.95)") - legend(0,230, lab, col=col, lty=1) - - ## basic tests against apply/embed - a <- diveMove:::.runquantile(x, k, c(0.3, 0.7), endrule="trim") - b <- t(apply(embed(x, k), 1, quantile, probs=c(0.3, 0.7))) - eps <- .Machine$double.eps ^ 0.5 - stopifnot(all(abs(a - b) < eps)) - - ## Test against loop approach - - ## This test works fine at the R prompt but fails during package check - - ## need to investigate - k <- 25; n <- 200 - x <- rnorm(n, sd=30) + abs(seq(n) - n / 4) # create random data - x[seq(1, n, 11)] <- NaN; # add NANs - k2 <- k %/% 2 - k1 <- k - k2 - 1 - a <- diveMove:::.runquantile(x, k, probs=c(0.3, 0.8)) - b <- matrix(0, n, 2) - for(j in 1:n) { - lo <- max(1, j - k1) - hi <- min(n, j + k2) - b[j, ] <- quantile(x[lo:hi], probs=c(0.3, 0.8), na.rm=TRUE) - } - ## stopifnot(all(abs(a-b)