Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bundle of small fixes and feats #882

Merged
merged 12 commits into from
Dec 15, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion R/DESCRIPTION
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
Package: Robyn
Type: Package
Title: Semi-Automated Marketing Mix Modeling (MMM) from Meta Marketing Science
Version: 3.10.5.9008
Version: 3.10.5.9009
Authors@R: c(
person("Gufeng", "Zhou", , "gufeng@meta.com", c("cre","aut")),
person("Leonel", "Sentana", , "leonelsentana@meta.com", c("aut")),
Expand Down
2 changes: 1 addition & 1 deletion R/R/allocator.R
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ robyn_allocator <- function(robyn_object = NULL,
# Spend values based on date range set
window_loc <- InputCollect$rollingWindowStartWhich:InputCollect$rollingWindowEndWhich
dt_optimCost <- slice(InputCollect$dt_mod, window_loc)
new_date_range <- check_metric_dates(date_range, dt_optimCost$ds, InputCollect$dayInterval, quiet = FALSE, is_allocator = TRUE)
new_date_range <- check_metric_dates(date_range, dt_optimCost$ds, InputCollect$dayInterval, quiet = quiet, is_allocator = TRUE)
date_min <- head(new_date_range$date_range_updated, 1)
date_max <- tail(new_date_range$date_range_updated, 1)
check_daterange(date_min, date_max, dt_optimCost$ds)
Expand Down
18 changes: 18 additions & 0 deletions R/R/auxiliary.R
Original file line number Diff line number Diff line change
Expand Up @@ -71,3 +71,21 @@ robyn_update <- function(dev = TRUE, ...) {
utils::install.packages("Robyn", ...)
}
}

# Merge baseline variables based on baseline_level param input
baseline_vars <- function(InputCollect, baseline_level) {
stopifnot(length(baseline_level) == 1)
stopifnot(baseline_level %in% 0:5)
x <- ""
if (baseline_level >= 1)
x <- c(x, "(Intercept)")
if (baseline_level >= 2)
x <- c(x, "trend")
if (baseline_level >= 3)
x <- unique(c(x, InputCollect$prophet_vars))
if (baseline_level >= 4)
x <- c(x, InputCollect$context_vars)
if (baseline_level >= 5)
x <- c(x, InputCollect$organic_vars)
return(x)
}
29 changes: 17 additions & 12 deletions R/R/clusters.R
Original file line number Diff line number Diff line change
Expand Up @@ -155,10 +155,11 @@ robyn_clusters <- function(input, dep_var_type,
get_height <- ceiling(k / 2) / 2
db <- (output$plot_clusters_ci / (output$plot_models_rois + output$plot_models_errors)) +
patchwork::plot_layout(heights = c(get_height, 1), guides = "collect")
# Suppressing "Picking joint bandwidth of x" messages
suppressMessages(ggsave(paste0(path, "pareto_clusters_detail.png"),
# Suppressing "Picking joint bandwidth of x" messages +
# In min(data$x, na.rm = TRUE) : no non-missing arguments to min; returning Inf warnings
suppressMessages(suppressWarnings(ggsave(paste0(path, "pareto_clusters_detail.png"),
plot = db, dpi = 500, width = 12, height = 4 + length(all_paid) * 2, limitsize = FALSE
))
)))
}

return(output)
Expand Down Expand Up @@ -198,9 +199,9 @@ confidence_calcs <- function(
v_samp <- df_chn$roi_total
}
boot_res <- .bootci(samp = v_samp, boot_n = boot_n)
boot_mean <- mean(boot_res$boot_means)
boot_mean <- mean(boot_res$boot_means, na.rm = TRUE)
boot_se <- boot_res$se
ci_low <- ifelse(boot_res$ci[1] < 0, 0, boot_res$ci[1])
ci_low <- ifelse(boot_res$ci[1] <= 0, 0, boot_res$ci[1])
ci_up <- boot_res$ci[2]

# Collect loop results
Expand All @@ -218,7 +219,7 @@ confidence_calcs <- function(
rn = i,
n = length(v_samp),
boot_mean = boot_mean,
x_sim = rnorm(sim_n, mean = boot_mean, sd = boot_se)
x_sim = suppressWarnings(rnorm(sim_n, mean = boot_mean, sd = boot_se))
) %>%
mutate(y_sim = dnorm(.data$x_sim, mean = boot_mean, sd = boot_se))
}
Expand Down Expand Up @@ -331,12 +332,17 @@ errors_scores <- function(df, balance = rep(1, 3), ts_validation = TRUE, ...) {

.min_max_norm <- function(x, min = 0, max = 1) {
x <- x[is.finite(x)]
if (length(x) == 1) {
x <- x[!is.na(x)]
if (length(x) <= 1) {
return(x)
} # return((max - min) / 2)
}
a <- min(x, na.rm = TRUE)
b <- max(x, na.rm = TRUE)
(max - min) * (x - a) / (b - a) + min
if (b - a != 0) {
return((max - min) * (x - a) / (b - a) + min)
} else {
return(x)
}
}

.clusters_df <- function(df, all_paid, balance = rep(1, 3), limit = 1, ts_validation = TRUE, ...) {
Expand Down Expand Up @@ -430,8 +436,7 @@ errors_scores <- function(df, balance = rep(1, 3), ts_validation = TRUE, ...) {

.bootci <- function(samp, boot_n, seed = 1, ...) {
set.seed(seed)

if (length(samp) > 1) {
if (length(samp[!is.na(samp)]) > 1) {
samp_n <- length(samp)
samp_mean <- mean(samp, na.rm = TRUE)
boot_sample <- matrix(
Expand All @@ -451,6 +456,6 @@ errors_scores <- function(df, balance = rep(1, 3), ts_validation = TRUE, ...) {

return(list(boot_means = boot_means, ci = ci, se = se))
} else {
return(list(boot_means = samp, ci = c(NA, NA), se = NA))
return(list(boot_means = samp, ci = c(samp, samp), se = 0))
}
}
5 changes: 4 additions & 1 deletion R/R/inputs.R
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,10 @@ robyn_inputs <- function(dt_input = NULL,
### Use case 3: running robyn_inputs() with json_file
if (!is.null(json_file)) {
json <- robyn_read(json_file, step = 1, ...)
if (is.null(dt_input) || is.null(dt_holidays)) stop("Provide 'dt_input' and 'dt_holidays'")
if (is.null(dt_input)) stop("Must provide 'dt_input' input; 'dt_holidays' input optional")
if (!is.null(hyperparameters)) {
warning("Replaced hyperparameters input with json_file's fixed hyperparameters values")
}
for (i in seq_along(json$InputCollect)) {
assign(names(json$InputCollect)[i], json$InputCollect[[i]])
}
Expand Down
8 changes: 6 additions & 2 deletions R/R/json.R
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
#' into the JSON file?
#' @param dir Character. Existing directory to export JSON file to.
#' @param pareto_df Dataframe. Save all pareto solutions to json file.
#' @param ... Additional parameters.
#' @param ... Additional parameters to export into a custom Extras element.
#' @examples
#' \dontrun{
#' InputCollectJSON <- robyn_inputs(
Expand All @@ -44,7 +44,6 @@ robyn_write <- function(InputCollect,
stopifnot(inherits(InputCollect, "robyn_inputs"))
if (!is.null(OutputCollect)) {
stopifnot(inherits(OutputCollect, "robyn_outputs"))
stopifnot(select_model %in% OutputCollect$allSolutions)
if (is.null(select_model) && length(OutputCollect$allSolutions == 1)) {
select_model <- OutputCollect$allSolutions
}
Expand Down Expand Up @@ -85,6 +84,7 @@ robyn_write <- function(InputCollect,

# Model associated data
if (length(select_model) == 1) {
stopifnot(select_model %in% OutputCollect$allSolutions)
outputs <- list()
outputs$select_model <- select_model
outputs$summary <- filter(OutputCollect$xDecompAgg, .data$solID == select_model) %>%
Expand Down Expand Up @@ -114,6 +114,10 @@ robyn_write <- function(InputCollect,
select_model <- "inputs"
}

if (length(list(...)) > 0) {
ret[["Extras"]] <- list(...)
}

if (!dir.exists(dir) & export) dir.create(dir, recursive = TRUE)
filename <- sprintf("%s/RobynModel-%s.json", dir, select_model)
filename <- gsub("//", "/", filename)
Expand Down
28 changes: 26 additions & 2 deletions R/R/plots.R
Original file line number Diff line number Diff line change
Expand Up @@ -242,12 +242,18 @@ robyn_plots <- function(
#' Generate and Export Robyn One-Pager Plots
#'
#' @rdname robyn_outputs
#' @param baseline_level Integer, from 0 to 5. Aggregate baseline variables,
#' depending on the level of aggregation you need. Default is 0 for no
#' aggregation. 1 for Intercept only. 2 adding trend. 3 adding all prophet
#' decomposition variables. 4. Adding contextual variables. 5 Adding organic
#' variables. Results will be reflected on the waterfall chart.
#' @return Invisible list with \code{patchwork} plot(s).
#' @export
robyn_onepagers <- function(
InputCollect, OutputCollect,
select_model = NULL, quiet = FALSE,
export = TRUE, plot_folder = OutputCollect$plot_folder, ...) {
export = TRUE, plot_folder = OutputCollect$plot_folder,
baseline_level = 0, ...) {
check_class("robyn_outputs", OutputCollect)
if (TRUE) {
pareto_fronts <- OutputCollect$pareto_fronts
Expand All @@ -266,6 +272,9 @@ robyn_onepagers <- function(
}
}

# Baseline variables
bvars <- baseline_vars(InputCollect, baseline_level)

# Prepare for parallel plotting
if (check_parallel_plot() && OutputCollect$cores > 1) registerDoParallel(OutputCollect$cores) else registerDoSEQ()
if (!hyper_fixed) {
Expand Down Expand Up @@ -381,7 +390,22 @@ robyn_onepagers <- function(
)

## 2. Waterfall
plotWaterfallLoop <- temp[[sid]]$plot2data$plotWaterfallLoop
plotWaterfallLoop <- temp[[sid]]$plot2data$plotWaterfallLoop %>%
mutate(rn = ifelse(
.data$rn %in% bvars, paste0("Baseline_L", baseline_level), as.character(.data$rn))) %>%
group_by(.data$rn) %>%
summarise(xDecompAgg = sum(.data$xDecompAgg, na.rm = TRUE),
xDecompPerc = sum(.data$xDecompPerc, na.rm = TRUE)) %>%
arrange(.data$xDecompPerc) %>%
mutate(
end = 1 - cumsum(.data$xDecompPerc),
start = lag(.data$end),
start = ifelse(is.na(.data$start), 1, .data$start),
id = row_number(),
rn = as.factor(as.character(.data$rn)),
sign = as.factor(ifelse(.data$xDecompPerc >= 0, "Positive", "Negative"))
)

p2 <- suppressWarnings(
ggplot(plotWaterfallLoop, aes(x = .data$id, fill = .data$sign)) +
geom_rect(aes(
Expand Down
7 changes: 7 additions & 0 deletions R/man/robyn_outputs.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion R/man/robyn_write.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.