Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Swaggerfile support for array parameters, object, files #532

Closed
wants to merge 42 commits into from
Closed
Show file tree
Hide file tree
Changes from 37 commits
Commits
Show all changes
42 commits
Select commit Hold shift + click to select a range
499bc1d
work on support for [] array syntax and object type
Apr 26, 2020
556695e
Added tests, finish adding features
Apr 27, 2020
22e0766
its the news
Apr 27, 2020
52d27bc
change minItems to match required
Apr 27, 2020
0047692
Added plumberExpression args metadata detection, support for file upl…
meztez Apr 29, 2020
5fa61bc
fix for linux
meztez Apr 29, 2020
877819f
generalize body parsing, big chunk
May 4, 2020
c1644e7
few tweaks, early morning, late night polish
May 4, 2020
574221e
rds parser and filename extension detection
meztez May 4, 2020
37a70db
perf improvement, recursive parseRaw for multipart
meztez May 5, 2020
5c6036e
change check location for to account for multipart
meztez May 8, 2020
3f5e02f
pr review changes
Jun 11, 2020
e2d1b82
merge master
Jun 11, 2020
4c3ee8e
as discussed during pr review call
Jun 11, 2020
aac97b9
Merge branch 'master' into list_plumber_type
Jun 11, 2020
fb864e2
come on tests, check
Jun 11, 2020
7983f0b
hopefully fix oldrel
Jun 11, 2020
d00ffd4
Use explicit type variable name
schloerke Jun 12, 2020
a5ebe78
Update R/swagger.R
meztez Jun 12, 2020
3f47a76
Update R/swagger.R
meztez Jun 12, 2020
63aa448
Update R/swagger.R
meztez Jun 12, 2020
ebf9627
Clarify priorize + add tests
Jun 12, 2020
d19b778
Update R/parse-block.R
meztez Jun 12, 2020
7dcd495
Merge branch 'list_plumber_type' of github.com:meztez/plumber into li…
Jun 12, 2020
32cff2c
type differenciation
Jun 12, 2020
2feda37
Fix bug
cpsievert Jun 12, 2020
0de5f2c
Update R/post-body.R
meztez Jun 12, 2020
e93d9b5
pr review
Jun 12, 2020
6d24651
more pr review
Jun 12, 2020
7a5f256
Update R/post-parsers.R
meztez Jun 12, 2020
8703658
Update R/post-parsers.R
meztez Jun 12, 2020
85881c5
Update R/query-string.R
meztez Jun 12, 2020
4f9e9af
Update R/query-string.R
meztez Jun 12, 2020
3b0871d
Update R/query-string.R
meztez Jun 12, 2020
939e400
pr reviwew more
Jun 12, 2020
36e8d6c
revert rlang, cause import problems, (chr both in crayon and rlang) p…
Jun 12, 2020
5ed196c
eval in plumberExpression env
Jun 12, 2020
b4911de
add attribute
Jun 12, 2020
fa3f66d
Update R/query-string.R
meztez Jun 12, 2020
0f769eb
pr review
Jun 13, 2020
21c9d8f
structure, max file size, parsers tests
Jun 15, 2020
304f0cd
switched rds to version 2 for r < 3.5 support
Jun 15, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions DESCRIPTION
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ Imports:
R6 (>= 2.0.0),
stringi (>= 0.3.0),
jsonlite (>= 0.9.16),
webutils (>= 1.1),
httpuv (>= 1.5.0),
crayon,
promises (>= 1.1.0),
Expand Down Expand Up @@ -64,6 +65,7 @@ Collate:
'paths.R'
'plumber-static.R'
'plumber-step.R'
'post-parsers.R'
'response.R'
'serializer-content-type.R'
'serializer-html.R'
Expand Down
12 changes: 12 additions & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

export(PlumberEndpoint)
export(PlumberStatic)
export(addParser)
export(addSerializer)
export(do_configure_https)
export(do_deploy_api)
Expand All @@ -10,10 +11,17 @@ export(do_provision)
export(do_remove_api)
export(do_remove_forward)
export(forward)
export(getCharacterSet)
export(include_file)
export(include_html)
export(include_md)
export(include_rmd)
export(parser_json)
export(parser_multi)
export(parser_octet)
export(parser_query)
export(parser_rds)
export(parser_text)
export(plumb)
export(plumber)
export(randomCookieKey)
Expand All @@ -32,5 +40,9 @@ importFrom(grDevices,dev.off)
importFrom(grDevices,jpeg)
importFrom(grDevices,png)
importFrom(httpuv,runServer)
importFrom(jsonlite,fromJSON)
importFrom(jsonlite,toJSON)
importFrom(jsonlite,validate)
importFrom(stats,runif)
importFrom(stringi,stri_match_first_regex)
importFrom(webutils,parse_multipart)
2 changes: 2 additions & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ plumber 0.5.0

### New features

* Added Swagger support for array parameters using syntax `name:[type]` and new type `list` (synonym df, data.frame). (@meztez)

* Added support for promises in endpoints, filters, and hooks. ([#248](https://github.com/rstudio/plumber/pull/248))

* Added support to a router's run method to allow the `swagger` parameter to be a function that
Expand Down
28 changes: 10 additions & 18 deletions R/content-types.R
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@ knownContentTypes <- list(
docx='application/vnd.openxmlformats-officedocument.wordprocessingml.document',
dotx='application/vnd.openxmlformats-officedocument.wordprocessingml.template',
xlam='application/vnd.ms-excel.addin.macroEnabled.12',
xlsb='application/vnd.ms-excel.sheet.binary.macroEnabled.12')
xlsb='application/vnd.ms-excel.sheet.binary.macroEnabled.12',
rds='application/rds')

getContentType <- function(ext, defaultType='application/octet-stream') {
ct <- knownContentTypes[[tolower(ext)]]
Expand All @@ -51,21 +52,12 @@ getContentType <- function(ext, defaultType='application/octet-stream') {
return(ct)
}

getCharacterSet <- function(contentType){
default <- "UTF-8"
if (is.null(contentType)) {
return(default)
}
charsetStart <- attr(
gregexpr(".*charset=(.*)", contentType, perl = T)[[1]],
"capture.start"
)
charsetStart <- as.integer(charsetStart)
as.character(
ifelse(
charsetStart > -1,
substr(contentType, charsetStart, nchar(contentType)),
default
)
)
#4x perf improvement when contentType is set
#' Request character set
#' @param contentType Request Content-Type header
#' @return Default to `UTF-8`. Otherwise return `charset` defined in request header.
#' @export
getCharacterSet <- function(contentType = NULL){
if (is.null(contentType)) return("UTF-8")
stri_match_first_regex(paste(contentType,"; charset=UTF-8"), "charset=([^;\\s]*)")[,2]
}
1 change: 1 addition & 0 deletions R/globals.R
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
.globals <- new.env()
.globals$serializers <- list()
.globals$processors <- new.env()
.globals$parsers <- list(func = list(), pattern = list())
9 changes: 5 additions & 4 deletions R/json.R
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
safeFromJSON <- function(txt, ...) {
if (!jsonlite::validate(txt)) {
#' @importFrom jsonlite validate fromJSON toJSON
#' @noRd
safeFromJSON <- function(txt) {
if (!validate(txt)) {
stop("Argument 'txt' is not a valid JSON string.")
}

jsonlite::fromJSON(txt, ...)
fromJSON(txt)
}
54 changes: 23 additions & 31 deletions R/parse-block.R
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ parseBlock <- function(lineNum, file){

line <- file[lineNum]

epMat <- stringi::stri_match(line, regex="^#['\\*]\\s*@(get|put|post|use|delete|head|options|patch)(\\s+(.*)$)?")
epMat <- stri_match(line, regex="^#['\\*]\\s*@(get|put|post|use|delete|head|options|patch)(\\s+(.*)$)?")
if (!is.na(epMat[1,2])){
p <- stri_trim_both(epMat[1,4])

Expand All @@ -39,10 +39,11 @@ parseBlock <- function(lineNum, file){
if (is.null(paths)){
paths <- list()
}

paths[[length(paths)+1]] <- list(verb = enumerateVerbs(epMat[1,2]), path = p)
}

filterMat <- stringi::stri_match(line, regex="^#['\\*]\\s*@filter(\\s+(.*)$)?")
filterMat <- stri_match(line, regex="^#['\\*]\\s*@filter(\\s+(.*)$)?")
if (!is.na(filterMat[1,1])){
f <- stri_trim_both(filterMat[1,3])

Expand All @@ -58,7 +59,7 @@ parseBlock <- function(lineNum, file){
filter <- f
}

preemptMat <- stringi::stri_match(line, regex="^#['\\*]\\s*@preempt(\\s+(.*)\\s*$)?")
preemptMat <- stri_match(line, regex="^#['\\*]\\s*@preempt(\\s+(.*)\\s*$)?")
if (!is.na(preemptMat[1,1])){
p <- stri_trim_both(preemptMat[1,3])
if (is.na(p) || p == ""){
Expand All @@ -71,7 +72,7 @@ parseBlock <- function(lineNum, file){
preempt <- p
}

assetsMat <- stringi::stri_match(line, regex="^#['\\*]\\s*@assets(\\s+(\\S*)(\\s+(\\S+))?\\s*)?$")
assetsMat <- stri_match(line, regex="^#['\\*]\\s*@assets(\\s+(\\S*)(\\s+(\\S+))?\\s*)?$")
if (!is.na(assetsMat[1,1])){
dir <- stri_trim_both(assetsMat[1,3])
if (is.na(dir) || dir == ""){
Expand All @@ -88,7 +89,7 @@ parseBlock <- function(lineNum, file){
assets <- list(dir=dir, path=prefixPath)
}

serMat <- stringi::stri_match(line, regex="^#['\\*]\\s*@serializer(\\s+([^\\s]+)\\s*(.*)\\s*$)?")
serMat <- stri_match(line, regex="^#['\\*]\\s*@serializer(\\s+([^\\s]+)\\s*(.*)\\s*$)?")
if (!is.na(serMat[1,1])){
s <- stri_trim_both(serMat[1,3])
if (is.na(s) || s == ""){
Expand Down Expand Up @@ -119,7 +120,7 @@ parseBlock <- function(lineNum, file){

}

shortSerMat <- stringi::stri_match(line, regex="^#['\\*]\\s*@(json|html)(.*)$")
shortSerMat <- stri_match(line, regex="^#['\\*]\\s*@(json|html)(.*)$")
if (!is.na(shortSerMat[1,2])) {
s <- stri_trim_both(shortSerMat[1,2])
if (!is.null(serializer)){
Expand Down Expand Up @@ -149,7 +150,7 @@ parseBlock <- function(lineNum, file){

}

imageMat <- stringi::stri_match(line, regex="^#['\\*]\\s*@(jpeg|png)([\\s\\(].*)?\\s*$")
imageMat <- stri_match(line, regex="^#['\\*]\\s*@(jpeg|png)([\\s\\(].*)?\\s*$")
if (!is.na(imageMat[1,1])){
if (!is.null(image)){
# Must have already assigned.
Expand All @@ -166,39 +167,30 @@ parseBlock <- function(lineNum, file){
}
}

responseMat <- stringi::stri_match(line, regex="^#['\\*]\\s*@response\\s+(\\w+)\\s+(\\S.+)\\s*$")
responseMat <- stri_match(line, regex="^#['\\*]\\s*@response\\s+(\\w+)\\s+(\\S.+)\\s*$")
if (!is.na(responseMat[1,1])){
resp <- list()
resp[[responseMat[1,2]]] <- list(description=responseMat[1,3])
responses <- c(responses, resp)
}

paramMat <- stringi::stri_match(line, regex="^#['\\*]\\s*@param(\\s+([^\\s]+)(\\s+(.*))?\\s*$)?")
paramMat <- stri_match(line, regex="^#['\\*]\\s*@param(\\s+([^\\s:]+):?([^\\s*]+)?(\\*)?(?:\\s+(.*))?\\s*$)?")
if (!is.na(paramMat[1,2])){
p <- stri_trim_both(paramMat[1,3])
if (is.na(p) || p == ""){
stopOnLine(lineNum, line, "No parameter specified.")
}

name <- paramMat[1,3]
type <- NA

nameType <- stringi::stri_match(name, regex="^([^\\s]+):(\\w+)(\\*?)$")
if (!is.na(nameType[1,1])){
name <- nameType[1,2]
type <- plumberToSwaggerType(nameType[1,3])
#stopOnLine(lineNum, line, "No parameter type specified")
}


reqd <- FALSE
if (!is.na(nameType[1,4])){
reqd <- nameType[1,4] == "*"
if (is.na(name)){
stopOnLine(lineNum, line, "No parameter specified.")
}
params[[name]] <- list(desc=paramMat[1,5], type=type, required=reqd)
type <- stri_replace_all(paramMat[1,4], "$1", regex = "^\\[([^\\]]*)\\]$")
type <- plumberToSwaggerType(type)
isArray <- stri_detect_regex(paramMat[1,4], "^\\[[^\\]]*\\]$")
isArray <- isArray && supportsArray(type)
isArray[is.na(isArray)] <- defaultSwaggerIsArray
required <- identical(paramMat[1,5], "*")

params[[name]] <- list(desc=paramMat[1,6], type=type, required=required, isArray=isArray)
}

tagMat <- stringi::stri_match(line, regex="^#['\\*]\\s*@tag\\s+(\\S.+)\\s*")
tagMat <- stri_match(line, regex="^#['\\*]\\s*@tag\\s+(\\S.+)\\s*")
if (!is.na(tagMat[1,1])){
t <- stri_trim_both(tagMat[1,2])
if (is.na(t) || t == ""){
Expand All @@ -210,7 +202,7 @@ parseBlock <- function(lineNum, file){
tags <- c(tags, t)
}

commentMat <- stringi::stri_match(line, regex="^#['\\*]\\s*([^@\\s].*$)")
commentMat <- stri_match(line, regex="^#['\\*]\\s*([^@\\s].*$)")
if (!is.na(commentMat[1,2])){
comments <- paste(comments, commentMat[1,2])
}
Expand All @@ -226,7 +218,7 @@ parseBlock <- function(lineNum, file){
imageAttr = imageAttr,
serializer = serializer,
assets = assets,
params = params,
params = rev(params),
schloerke marked this conversation as resolved.
Show resolved Hide resolved
comments = comments,
responses = responses,
tags = tags
Expand Down
2 changes: 1 addition & 1 deletion R/parse-globals.R
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ parseOneGlobal <- function(fields, argument){
fields$produces <- strsplit(def, split="\\s+")[[1]]
},
apiTag={
tagMat <- stringi::stri_match(def, regex="^\\s*(\\w+)\\s+(\\S.+)\\s*$")
tagMat <- stri_match(def, regex="^\\s*(\\w+)\\s+(\\S.+)\\s*$")
name <- tagMat[1,2]
description <- tagMat[1,3]
if(!is.null(fields$tags) && name %in% fields$tags$name) {
Expand Down
13 changes: 10 additions & 3 deletions R/plumber-step.R
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,10 @@ PlumberEndpoint <- R6Class(
responses = NA,
#' @description retrieve endpoint typed parameters
getTypedParams = function(){
data.frame(name=private$regex$names, type=private$regex$types, stringsAsFactors = FALSE)
data.frame(name = private$regex$names,
type = private$regex$types,
isArray = private$regex$areArrays,
stringsAsFactors = FALSE)
},
#' @field params endpoint parameters
params = NA,
Expand Down Expand Up @@ -180,8 +183,6 @@ PlumberEndpoint <- R6Class(
self$verbs <- verbs
self$path <- path

private$regex <- createPathRegex(path)

private$expr <- expr
if (is.expression(expr)){
private$func <- eval(expr, envir)
Expand All @@ -190,6 +191,8 @@ PlumberEndpoint <- R6Class(
}
private$envir <- envir

private$regex <- createPathRegex(path, self$getFuncParams())
schloerke marked this conversation as resolved.
Show resolved Hide resolved

if (!missing(serializer) && !is.null(serializer)){
self$serializer <- serializer
}
Expand All @@ -216,6 +219,10 @@ PlumberEndpoint <- R6Class(
#' @param path endpoint path
getPathParams = function(path){
extractPathParams(private$regex, path)
},
#' @description retrieve endpoint expression parameters
getFuncParams = function() {
getArgsMetadata(private$func)
}
),
private = list(
Expand Down
61 changes: 40 additions & 21 deletions R/post-body.R
Original file line number Diff line number Diff line change
@@ -1,34 +1,53 @@
postBodyFilter <- function(req){
handled <- req$.internal$postBodyHandled
if (is.null(handled) || handled != TRUE){
body <- paste0(req$rook.input$read_lines(), collapse = "\n")
charset <- getCharacterSet(req$HTTP_CONTENT_TYPE)
args <- parseBody(body, charset)
req$postBody <- body
if (is.null(handled) || handled != TRUE) {
# This will return raw bytes
body <- req$rook.input$read()
meztez marked this conversation as resolved.
Show resolved Hide resolved
type <- req$HTTP_CONTENT_TYPE
args <- parseBody(body, type)
req$args <- c(req$args, args)
req$postBodyRaw <- body
if (isTRUE(getOption("plumber.postBody", TRUE))) {
req$rook.input$rewind()
req$postBody <- paste0(req$rook.input$read_lines(), collapse = "\n")
}
req$.internal$postBodyHandled <- TRUE
}
forward()
}

#' @noRd
parseBody <- function(body, charset = "UTF-8"){
# The body in a curl call can also include querystring formatted data
# Is there data in the request?
if (is.null(body) || length(body) == 0 || body == "") {
return(list())
}
parseBody <- function(body, content_type = NULL) {
if (!is.raw(body)) {body <- charToRaw(body)}
toparse <- list(value = body, content_type = content_type)
parseRaw(toparse)
}

if (is.character(body)) {
Encoding(body) <- charset
}
parseRaw <- function(toparse) {
if (length(toparse$value) == 0L) return(list())
parser <- parserPicker(toparse$content_type, toparse$value[1], toparse$filename)
do.call(parser(), toparse)
}

# Is it JSON data?
if (stri_startswith_fixed(body, "{")) {
ret <- safeFromJSON(body)
parserPicker <- function(content_type, first_byte, filename = NULL) {
#fast default to json when first byte is 7b (ascii {)
if (first_byte == as.raw(123L)) {
return(.globals$parsers$func[["json"]])
}
if (is.null(content_type)) {
return(.globals$parsers$func[["query"]])
}
meztez marked this conversation as resolved.
Show resolved Hide resolved
# else try to find a match
patterns <- .globals$parsers$pattern
parser <- .globals$parsers$func[stri_startswith_fixed(content_type, patterns)]
# Should we warn when multiple parsers match?
# warning("Multiple body parsers matches for content-type : ", toparse$content_type, ". Parser ", names(parser)[1L], " used.")
if (length(parser) == 0L) {
if (is.null(filename)) {
return(.globals$parsers$func[["query"]])
} else {
return(.globals$parsers$func[["octet"]])
}
} else {
# If not handle it as a query string
ret <- parseQS(body)
return(parser[[1L]])
}
ret
}
Loading