From b5eb2866cfceb69b0d4dd4948273d679a884fbb2 Mon Sep 17 00:00:00 2001 From: Paul Zabelin Date: Sun, 17 Apr 2016 03:22:31 -0700 Subject: [PATCH] add Go dependencies by godep see https://github.com/tools/godep --- Godeps/Godeps.json | 48 + Godeps/Readme | 5 + .../sabhiram/go-git-ignore/.gitignore | 28 + .../sabhiram/go-git-ignore/.travis.yml | 18 + .../github.com/sabhiram/go-git-ignore/LICENSE | 22 + .../sabhiram/go-git-ignore/README.md | 17 + .../sabhiram/go-git-ignore/ignore.go | 200 + vendor/github.com/soniakeys/graph/.gitignore | 2 + vendor/github.com/soniakeys/graph/.travis.yml | 8 + vendor/github.com/soniakeys/graph/adj.go | 325 + vendor/github.com/soniakeys/graph/adj_RO.go | 387 + vendor/github.com/soniakeys/graph/adj_cg.go | 387 + vendor/github.com/soniakeys/graph/bits.go | 207 + vendor/github.com/soniakeys/graph/bits32.go | 23 + vendor/github.com/soniakeys/graph/bits64.go | 22 + vendor/github.com/soniakeys/graph/dir.go | 538 ++ vendor/github.com/soniakeys/graph/dir_RO.go | 395 + vendor/github.com/soniakeys/graph/dir_cg.go | 395 + vendor/github.com/soniakeys/graph/doc.go | 128 + vendor/github.com/soniakeys/graph/fromlist.go | 418 ++ vendor/github.com/soniakeys/graph/graph.go | 181 + vendor/github.com/soniakeys/graph/hacking.md | 37 + vendor/github.com/soniakeys/graph/mst.go | 244 + vendor/github.com/soniakeys/graph/random.go | 325 + vendor/github.com/soniakeys/graph/readme.md | 38 + vendor/github.com/soniakeys/graph/sssp.go | 881 +++ vendor/github.com/soniakeys/graph/travis.sh | 11 + vendor/github.com/soniakeys/graph/undir.go | 321 + vendor/github.com/soniakeys/graph/undir_RO.go | 659 ++ vendor/github.com/soniakeys/graph/undir_cg.go | 659 ++ vendor/golang.org/x/net/LICENSE | 27 + vendor/golang.org/x/net/PATENTS | 22 + vendor/golang.org/x/net/context/context.go | 156 + .../x/net/context/ctxhttp/cancelreq.go | 19 + .../x/net/context/ctxhttp/cancelreq_go14.go | 23 + .../x/net/context/ctxhttp/ctxhttp.go | 145 + vendor/golang.org/x/net/context/go17.go | 72 + vendor/golang.org/x/net/context/pre_go17.go | 300 + vendor/golang.org/x/oauth2/.travis.yml | 14 + vendor/golang.org/x/oauth2/AUTHORS | 3 + vendor/golang.org/x/oauth2/CONTRIBUTING.md | 31 + vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 + vendor/golang.org/x/oauth2/LICENSE | 27 + vendor/golang.org/x/oauth2/README.md | 64 + .../golang.org/x/oauth2/client_appengine.go | 25 + vendor/golang.org/x/oauth2/internal/oauth2.go | 76 + vendor/golang.org/x/oauth2/internal/token.go | 225 + .../golang.org/x/oauth2/internal/transport.go | 69 + vendor/golang.org/x/oauth2/oauth2.go | 337 + vendor/golang.org/x/oauth2/token.go | 158 + vendor/golang.org/x/oauth2/transport.go | 132 + vendor/google.golang.org/api/LICENSE | 27 + .../api/drive/v3/drive-api.json | 2410 ++++++ .../api/drive/v3/drive-gen.go | 6434 +++++++++++++++++ .../api/gensupport/backoff.go | 46 + .../api/gensupport/buffer.go | 77 + .../google.golang.org/api/gensupport/doc.go | 10 + .../google.golang.org/api/gensupport/json.go | 172 + .../google.golang.org/api/gensupport/media.go | 200 + .../api/gensupport/params.go | 50 + .../api/gensupport/resumable.go | 198 + .../google.golang.org/api/gensupport/retry.go | 77 + .../api/googleapi/googleapi.go | 432 ++ .../googleapi/internal/uritemplates/LICENSE | 18 + .../internal/uritemplates/uritemplates.go | 220 + .../googleapi/internal/uritemplates/utils.go | 13 + .../google.golang.org/api/googleapi/types.go | 182 + 67 files changed, 19423 insertions(+) create mode 100644 Godeps/Godeps.json create mode 100644 Godeps/Readme create mode 100644 vendor/github.com/sabhiram/go-git-ignore/.gitignore create mode 100644 vendor/github.com/sabhiram/go-git-ignore/.travis.yml create mode 100644 vendor/github.com/sabhiram/go-git-ignore/LICENSE create mode 100644 vendor/github.com/sabhiram/go-git-ignore/README.md create mode 100644 vendor/github.com/sabhiram/go-git-ignore/ignore.go create mode 100644 vendor/github.com/soniakeys/graph/.gitignore create mode 100644 vendor/github.com/soniakeys/graph/.travis.yml create mode 100644 vendor/github.com/soniakeys/graph/adj.go create mode 100644 vendor/github.com/soniakeys/graph/adj_RO.go create mode 100644 vendor/github.com/soniakeys/graph/adj_cg.go create mode 100644 vendor/github.com/soniakeys/graph/bits.go create mode 100644 vendor/github.com/soniakeys/graph/bits32.go create mode 100644 vendor/github.com/soniakeys/graph/bits64.go create mode 100644 vendor/github.com/soniakeys/graph/dir.go create mode 100644 vendor/github.com/soniakeys/graph/dir_RO.go create mode 100644 vendor/github.com/soniakeys/graph/dir_cg.go create mode 100644 vendor/github.com/soniakeys/graph/doc.go create mode 100644 vendor/github.com/soniakeys/graph/fromlist.go create mode 100644 vendor/github.com/soniakeys/graph/graph.go create mode 100644 vendor/github.com/soniakeys/graph/hacking.md create mode 100644 vendor/github.com/soniakeys/graph/mst.go create mode 100644 vendor/github.com/soniakeys/graph/random.go create mode 100644 vendor/github.com/soniakeys/graph/readme.md create mode 100644 vendor/github.com/soniakeys/graph/sssp.go create mode 100644 vendor/github.com/soniakeys/graph/travis.sh create mode 100644 vendor/github.com/soniakeys/graph/undir.go create mode 100644 vendor/github.com/soniakeys/graph/undir_RO.go create mode 100644 vendor/github.com/soniakeys/graph/undir_cg.go create mode 100644 vendor/golang.org/x/net/LICENSE create mode 100644 vendor/golang.org/x/net/PATENTS create mode 100644 vendor/golang.org/x/net/context/context.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/cancelreq.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 vendor/golang.org/x/net/context/go17.go create mode 100644 vendor/golang.org/x/net/context/pre_go17.go create mode 100644 vendor/golang.org/x/oauth2/.travis.yml create mode 100644 vendor/golang.org/x/oauth2/AUTHORS create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS create mode 100644 vendor/golang.org/x/oauth2/LICENSE create mode 100644 vendor/golang.org/x/oauth2/README.md create mode 100644 vendor/golang.org/x/oauth2/client_appengine.go create mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/internal/token.go create mode 100644 vendor/golang.org/x/oauth2/internal/transport.go create mode 100644 vendor/golang.org/x/oauth2/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/token.go create mode 100644 vendor/golang.org/x/oauth2/transport.go create mode 100644 vendor/google.golang.org/api/LICENSE create mode 100644 vendor/google.golang.org/api/drive/v3/drive-api.json create mode 100644 vendor/google.golang.org/api/drive/v3/drive-gen.go create mode 100644 vendor/google.golang.org/api/gensupport/backoff.go create mode 100644 vendor/google.golang.org/api/gensupport/buffer.go create mode 100644 vendor/google.golang.org/api/gensupport/doc.go create mode 100644 vendor/google.golang.org/api/gensupport/json.go create mode 100644 vendor/google.golang.org/api/gensupport/media.go create mode 100644 vendor/google.golang.org/api/gensupport/params.go create mode 100644 vendor/google.golang.org/api/gensupport/resumable.go create mode 100644 vendor/google.golang.org/api/gensupport/retry.go create mode 100644 vendor/google.golang.org/api/googleapi/googleapi.go create mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE create mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go create mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go create mode 100644 vendor/google.golang.org/api/googleapi/types.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json new file mode 100644 index 00000000..e6f80325 --- /dev/null +++ b/Godeps/Godeps.json @@ -0,0 +1,48 @@ +{ + "ImportPath": "github.com/prasmussen/gdrive", + "GoVersion": "go1.6", + "GodepVersion": "v61", + "Deps": [ + { + "ImportPath": "github.com/sabhiram/go-git-ignore", + "Rev": "228fcfa2a06e870a3ef238d54c45ea847f492a37" + }, + { + "ImportPath": "github.com/soniakeys/graph", + "Comment": "svg-v0-58-gc265d96", + "Rev": "c265d9676750b13b9520ba4ad4f8359fa1aed9fd" + }, + { + "ImportPath": "golang.org/x/net/context", + "Rev": "fb93926129b8ec0056f2f458b1f519654814edf0" + }, + { + "ImportPath": "golang.org/x/net/context/ctxhttp", + "Rev": "fb93926129b8ec0056f2f458b1f519654814edf0" + }, + { + "ImportPath": "golang.org/x/oauth2", + "Rev": "7e9cd5d59563851383f8f81a7fbb01213709387c" + }, + { + "ImportPath": "golang.org/x/oauth2/internal", + "Rev": "7e9cd5d59563851383f8f81a7fbb01213709387c" + }, + { + "ImportPath": "google.golang.org/api/drive/v3", + "Rev": "9737cc9e103c00d06a8f3993361dec083df3d252" + }, + { + "ImportPath": "google.golang.org/api/gensupport", + "Rev": "9737cc9e103c00d06a8f3993361dec083df3d252" + }, + { + "ImportPath": "google.golang.org/api/googleapi", + "Rev": "9737cc9e103c00d06a8f3993361dec083df3d252" + }, + { + "ImportPath": "google.golang.org/api/googleapi/internal/uritemplates", + "Rev": "9737cc9e103c00d06a8f3993361dec083df3d252" + } + ] +} diff --git a/Godeps/Readme b/Godeps/Readme new file mode 100644 index 00000000..4cdaa53d --- /dev/null +++ b/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/vendor/github.com/sabhiram/go-git-ignore/.gitignore b/vendor/github.com/sabhiram/go-git-ignore/.gitignore new file mode 100644 index 00000000..0e919aff --- /dev/null +++ b/vendor/github.com/sabhiram/go-git-ignore/.gitignore @@ -0,0 +1,28 @@ +# Package test fixtures +test_fixtures + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + diff --git a/vendor/github.com/sabhiram/go-git-ignore/.travis.yml b/vendor/github.com/sabhiram/go-git-ignore/.travis.yml new file mode 100644 index 00000000..24ddadf1 --- /dev/null +++ b/vendor/github.com/sabhiram/go-git-ignore/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.3 + - tip + +env: + - "PATH=$HOME/gopath/bin:$PATH" + +before_install: + - go get github.com/stretchr/testify/assert + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + +script: + - go test -v -covermode=count -coverprofile=coverage.out + - goveralls -coverprofile=coverage.out -service travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/sabhiram/go-git-ignore/LICENSE b/vendor/github.com/sabhiram/go-git-ignore/LICENSE new file mode 100644 index 00000000..c606f49e --- /dev/null +++ b/vendor/github.com/sabhiram/go-git-ignore/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Shaba Abhiram + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/sabhiram/go-git-ignore/README.md b/vendor/github.com/sabhiram/go-git-ignore/README.md new file mode 100644 index 00000000..fbbb3761 --- /dev/null +++ b/vendor/github.com/sabhiram/go-git-ignore/README.md @@ -0,0 +1,17 @@ +# go-git-ignore + +[![Build Status](https://travis-ci.org/sabhiram/go-git-ignore.svg)](https://travis-ci.org/sabhiram/go-git-ignore) [![Coverage Status](https://coveralls.io/repos/sabhiram/go-git-ignore/badge.png?branch=master)](https://coveralls.io/r/sabhiram/go-git-ignore?branch=master) + +A gitignore parser for `Go` + +## Install + +```shell +go get github.com/sabhiram/go-git-ignore +``` + +## Usage + +```shell +TODO +``` diff --git a/vendor/github.com/sabhiram/go-git-ignore/ignore.go b/vendor/github.com/sabhiram/go-git-ignore/ignore.go new file mode 100644 index 00000000..e3241b2c --- /dev/null +++ b/vendor/github.com/sabhiram/go-git-ignore/ignore.go @@ -0,0 +1,200 @@ +/* +ignore is a library which returns a new ignorer object which can +test against various paths. This is particularly useful when trying +to filter files based on a .gitignore document + +The rules for parsing the input file are the same as the ones listed +in the Git docs here: http://git-scm.com/docs/gitignore + +The summarized version of the same has been copied here: + + 1. A blank line matches no files, so it can serve as a separator + for readability. + 2. A line starting with # serves as a comment. Put a backslash ("\") + in front of the first hash for patterns that begin with a hash. + 3. Trailing spaces are ignored unless they are quoted with backslash ("\"). + 4. An optional prefix "!" which negates the pattern; any matching file + excluded by a previous pattern will become included again. It is not + possible to re-include a file if a parent directory of that file is + excluded. Git doesn’t list excluded directories for performance reasons, + so any patterns on contained files have no effect, no matter where they + are defined. Put a backslash ("\") in front of the first "!" for + patterns that begin with a literal "!", for example, "\!important!.txt". + 5. If the pattern ends with a slash, it is removed for the purpose of the + following description, but it would only find a match with a directory. + In other words, foo/ will match a directory foo and paths underneath it, + but will not match a regular file or a symbolic link foo (this is + consistent with the way how pathspec works in general in Git). + 6. If the pattern does not contain a slash /, Git treats it as a shell glob + pattern and checks for a match against the pathname relative to the + location of the .gitignore file (relative to the toplevel of the work + tree if not from a .gitignore file). + 7. Otherwise, Git treats the pattern as a shell glob suitable for + consumption by fnmatch(3) with the FNM_PATHNAME flag: wildcards in the + pattern will not match a / in the pathname. For example, + "Documentation/*.html" matches "Documentation/git.html" but not + "Documentation/ppc/ppc.html" or "tools/perf/Documentation/perf.html". + 8. A leading slash matches the beginning of the pathname. For example, + "/*.c" matches "cat-file.c" but not "mozilla-sha1/sha1.c". + 9. Two consecutive asterisks ("**") in patterns matched against full + pathname may have special meaning: + i. A leading "**" followed by a slash means match in all directories. + For example, "** /foo" matches file or directory "foo" anywhere, + the same as pattern "foo". "** /foo/bar" matches file or directory + "bar" anywhere that is directly under directory "foo". + ii. A trailing "/**" matches everything inside. For example, "abc/**" + matches all files inside directory "abc", relative to the location + of the .gitignore file, with infinite depth. + iii. A slash followed by two consecutive asterisks then a slash matches + zero or more directories. For example, "a/** /b" matches "a/b", + "a/x/b", "a/x/y/b" and so on. + iv. Other consecutive asterisks are considered invalid. */ +package ignore + +import ( + "io/ioutil" + "os" + "regexp" + "strings" +) + +// An IgnoreParser is an interface which exposes two methods: +// MatchesPath() - Returns true if the path is targeted by the patterns compiled in the GitIgnore structure +type IgnoreParser interface { + IncludesPath(f string) bool + IgnoresPath(f string) bool + MatchesPath(f string) bool +} + +// GitIgnore is a struct which contains a slice of regexp.Regexp +// patterns +type GitIgnore struct { + patterns []*regexp.Regexp // List of regexp patterns which this ignore file applies + negate []bool // List of booleans which determine if the pattern is negated +} + +// This function pretty much attempts to mimic the parsing rules +// listed above at the start of this file +func getPatternFromLine(line string) (*regexp.Regexp, bool) { + // Trim OS-specific carriage returns. + line = strings.TrimRight(line, "\r") + + // Strip comments [Rule 2] + if strings.HasPrefix(line, `#`) { + return nil, false + } + + // Trim string [Rule 3] + // TODO: Handle [Rule 3], when the " " is escaped with a \ + line = strings.Trim(line, " ") + + // Exit for no-ops and return nil which will prevent us from + // appending a pattern against this line + if line == "" { + return nil, false + } + + // TODO: Handle [Rule 4] which negates the match for patterns leading with "!" + negatePattern := false + if line[0] == '!' { + negatePattern = true + line = line[1:] + } + + // Handle [Rule 2, 4], when # or ! is escaped with a \ + // Handle [Rule 4] once we tag negatePattern, strip the leading ! char + if regexp.MustCompile(`^(\#|\!)`).MatchString(line) { + line = line[1:] + } + + // If we encounter a foo/*.blah in a folder, prepend the / char + if regexp.MustCompile(`([^\/+])/.*\*\.`).MatchString(line) && line[0] != '/' { + line = "/" + line + } + + // Handle escaping the "." char + line = regexp.MustCompile(`\.`).ReplaceAllString(line, `\.`) + + magicStar := "#$~" + + // Handle "/**/" usage + if strings.HasPrefix(line, "/**/") { + line = line[1:] + } + line = regexp.MustCompile(`/\*\*/`).ReplaceAllString(line, `(/|/.+/)`) + line = regexp.MustCompile(`\*\*/`).ReplaceAllString(line, `(|.`+magicStar+`/)`) + line = regexp.MustCompile(`/\*\*`).ReplaceAllString(line, `(|/.`+magicStar+`)`) + + // Handle escaping the "*" char + line = regexp.MustCompile(`\\\*`).ReplaceAllString(line, `\`+magicStar) + line = regexp.MustCompile(`\*`).ReplaceAllString(line, `([^/]*)`) + + // Handle escaping the "?" char + line = strings.Replace(line, "?", `\?`, -1) + + line = strings.Replace(line, magicStar, "*", -1) + + // Temporary regex + var expr = "" + if strings.HasSuffix(line, "/") { + expr = line + "(|.*)$" + } else { + expr = line + "(|/.*)$" + } + if strings.HasPrefix(expr, "/") { + expr = "^(|/)" + expr[1:] + } else { + expr = "^(|.*/)" + expr + } + pattern, _ := regexp.Compile(expr) + + return pattern, negatePattern +} + +// Accepts a variadic set of strings, and returns a GitIgnore object which +// converts and appends the lines in the input to regexp.Regexp patterns +// held within the GitIgnore objects "patterns" field +func CompileIgnoreLines(lines ...string) (*GitIgnore, error) { + g := new(GitIgnore) + for _, line := range lines { + pattern, negatePattern := getPatternFromLine(line) + if pattern != nil { + g.patterns = append(g.patterns, pattern) + g.negate = append(g.negate, negatePattern) + } + } + return g, nil +} + +// Accepts a ignore file as the input, parses the lines out of the file +// and invokes the CompileIgnoreLines method +func CompileIgnoreFile(fpath string) (*GitIgnore, error) { + buffer, error := ioutil.ReadFile(fpath) + if error == nil { + s := strings.Split(string(buffer), "\n") + return CompileIgnoreLines(s...) + } + return nil, error +} + +// MatchesPath is an interface function for the IgnoreParser interface. +// It returns true if the given GitIgnore structure would target a given +// path string "f" +func (g GitIgnore) MatchesPath(f string) bool { + // Replace OS-specific path separator. + f = strings.Replace(f, string(os.PathSeparator), "/", -1) + + matchesPath := false + for idx, pattern := range g.patterns { + if pattern.MatchString(f) { + // If this is a regular target (not negated with a gitignore exclude "!" etc) + if !g.negate[idx] { + matchesPath = true + // Negated pattern, and matchesPath is already set + } else if matchesPath { + matchesPath = false + } + } + } + return matchesPath +} diff --git a/vendor/github.com/soniakeys/graph/.gitignore b/vendor/github.com/soniakeys/graph/.gitignore new file mode 100644 index 00000000..3be61584 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/.gitignore @@ -0,0 +1,2 @@ +*.dot + diff --git a/vendor/github.com/soniakeys/graph/.travis.yml b/vendor/github.com/soniakeys/graph/.travis.yml new file mode 100644 index 00000000..bcc4f9fe --- /dev/null +++ b/vendor/github.com/soniakeys/graph/.travis.yml @@ -0,0 +1,8 @@ +sudo: false +language: go +# update travis.sh when changing version number here +go: + - 1.2.1 + - 1.6 +install: go get -t ./... +script: ./travis.sh diff --git a/vendor/github.com/soniakeys/graph/adj.go b/vendor/github.com/soniakeys/graph/adj.go new file mode 100644 index 00000000..165f365b --- /dev/null +++ b/vendor/github.com/soniakeys/graph/adj.go @@ -0,0 +1,325 @@ +// Copyright 2014 Sonia Keys +// License MIT: https://opensource.org/licenses/MIT + +package graph + +// adj.go contains methods on AdjacencyList and LabeledAdjacencyList. +// +// AdjacencyList methods are placed first and are alphabetized. +// LabeledAdjacencyList methods follow, also alphabetized. +// Only exported methods need be alphabetized; non-exported methods can +// be left near their use. + +import ( + "math" + "sort" +) + +// HasParallelSort identifies if a graph contains parallel arcs, multiple arcs +// that lead from a node to the same node. +// +// If the graph has parallel arcs, the results fr and to represent an example +// where there are parallel arcs from node fr to node to. +// +// If there are no parallel arcs, the method returns false -1 -1. +// +// Multiple loops on a node count as parallel arcs. +// +// "Sort" in the method name indicates that sorting is used to detect parallel +// arcs. Compared to method HasParallelMap, this may give better performance +// for small or sparse graphs but will have asymtotically worse performance for +// large dense graphs. +func (g AdjacencyList) HasParallelSort() (has bool, fr, to NI) { + var t NodeList + for n, to := range g { + if len(to) == 0 { + continue + } + // different code in the labeled version, so no code gen. + t = append(t[:0], to...) + sort.Sort(t) + t0 := t[0] + for _, to := range t[1:] { + if to == t0 { + return true, NI(n), t0 + } + t0 = to + } + } + return false, -1, -1 +} + +// IsUndirected returns true if g represents an undirected graph. +// +// Returns true when all non-loop arcs are paired in reciprocal pairs. +// Otherwise returns false and an example unpaired arc. +func (g AdjacencyList) IsUndirected() (u bool, from, to NI) { + // similar code in dot/writeUndirected + unpaired := make(AdjacencyList, len(g)) + for fr, to := range g { + arc: // for each arc in g + for _, to := range to { + if to == NI(fr) { + continue // loop + } + // search unpaired arcs + ut := unpaired[to] + for i, u := range ut { + if u == NI(fr) { // found reciprocal + last := len(ut) - 1 + ut[i] = ut[last] + unpaired[to] = ut[:last] + continue arc + } + } + // reciprocal not found + unpaired[fr] = append(unpaired[fr], to) + } + } + for fr, to := range unpaired { + if len(to) > 0 { + return false, NI(fr), to[0] + } + } + return true, -1, -1 +} + +// Edgelist constructs the edge list rerpresentation of a graph. +// +// An edge is returned for each arc of the graph. For undirected graphs +// this includes reciprocal edges. +// +// See also WeightedEdgeList method. +func (g LabeledAdjacencyList) EdgeList() (el []LabeledEdge) { + for fr, to := range g { + for _, to := range to { + el = append(el, LabeledEdge{Edge{NI(fr), to.To}, to.Label}) + } + } + return +} + +// FloydWarshall finds all pairs shortest distances for a simple weighted +// graph without negative cycles. +// +// In result array d, d[i][j] will be the shortest distance from node i +// to node j. Any diagonal element < 0 indicates a negative cycle exists. +// +// If g is an undirected graph with no negative edge weights, the result +// array will be a distance matrix, for example as used by package +// github.com/soniakeys/cluster. +func (g LabeledAdjacencyList) FloydWarshall(w WeightFunc) (d [][]float64) { + d = newFWd(len(g)) + for fr, to := range g { + for _, to := range to { + d[fr][to.To] = w(to.Label) + } + } + solveFW(d) + return +} + +// little helper function, makes a blank matrix for FloydWarshall. +func newFWd(n int) [][]float64 { + d := make([][]float64, n) + for i := range d { + di := make([]float64, n) + for j := range di { + if j != i { + di[j] = math.Inf(1) + } + } + d[i] = di + } + return d +} + +// Floyd Warshall solver, once the matrix d is initialized by arc weights. +func solveFW(d [][]float64) { + for k, dk := range d { + for _, di := range d { + dik := di[k] + for j := range d { + if d2 := dik + dk[j]; d2 < di[j] { + di[j] = d2 + } + } + } + } +} + +// HasArcLabel returns true if g has any arc from node fr to node to +// with label l. +// +// Also returned is the index within the slice of arcs from node fr. +// If no arc from fr to to is present, HasArcLabel returns false, -1. +func (g LabeledAdjacencyList) HasArcLabel(fr, to NI, l LI) (bool, int) { + t := Half{to, l} + for x, h := range g[fr] { + if h == t { + return true, x + } + } + return false, -1 +} + +// HasParallelSort identifies if a graph contains parallel arcs, multiple arcs +// that lead from a node to the same node. +// +// If the graph has parallel arcs, the results fr and to represent an example +// where there are parallel arcs from node fr to node to. +// +// If there are no parallel arcs, the method returns -1 -1. +// +// Multiple loops on a node count as parallel arcs. +// +// "Sort" in the method name indicates that sorting is used to detect parallel +// arcs. Compared to method HasParallelMap, this may give better performance +// for small or sparse graphs but will have asymtotically worse performance for +// large dense graphs. +func (g LabeledAdjacencyList) HasParallelSort() (has bool, fr, to NI) { + var t NodeList + for n, to := range g { + if len(to) == 0 { + continue + } + // slightly different code needed here compared to AdjacencyList + t = t[:0] + for _, to := range to { + t = append(t, to.To) + } + sort.Sort(t) + t0 := t[0] + for _, to := range t[1:] { + if to == t0 { + return true, NI(n), t0 + } + t0 = to + } + } + return false, -1, -1 +} + +// IsUndirected returns true if g represents an undirected graph. +// +// Returns true when all non-loop arcs are paired in reciprocal pairs with +// matching labels. Otherwise returns false and an example unpaired arc. +// +// Note the requirement that reciprocal pairs have matching labels is +// an additional test not present in the otherwise equivalent unlabeled version +// of IsUndirected. +func (g LabeledAdjacencyList) IsUndirected() (u bool, from NI, to Half) { + unpaired := make(LabeledAdjacencyList, len(g)) + for fr, to := range g { + arc: // for each arc in g + for _, to := range to { + if to.To == NI(fr) { + continue // loop + } + // search unpaired arcs + ut := unpaired[to.To] + for i, u := range ut { + if u.To == NI(fr) && u.Label == to.Label { // found reciprocal + last := len(ut) - 1 + ut[i] = ut[last] + unpaired[to.To] = ut[:last] + continue arc + } + } + // reciprocal not found + unpaired[fr] = append(unpaired[fr], to) + } + } + for fr, to := range unpaired { + if len(to) > 0 { + return false, NI(fr), to[0] + } + } + return true, -1, to +} + +// NegativeArc returns true if the receiver graph contains a negative arc. +func (g LabeledAdjacencyList) NegativeArc(w WeightFunc) bool { + for _, nbs := range g { + for _, nb := range nbs { + if w(nb.Label) < 0 { + return true + } + } + } + return false +} + +// Unlabeled constructs the unlabeled graph corresponding to g. +func (g LabeledAdjacencyList) Unlabeled() AdjacencyList { + a := make(AdjacencyList, len(g)) + for n, nbs := range g { + to := make([]NI, len(nbs)) + for i, nb := range nbs { + to[i] = nb.To + } + a[n] = to + } + return a +} + +// WeightedEdgeList constructs a WeightedEdgeList object from a +// LabeledAdjacencyList. +// +// Internally it calls g.EdgeList() to obtain the Edges member. +// See LabeledAdjacencyList.EdgeList(). +func (g LabeledAdjacencyList) WeightedEdgeList(w WeightFunc) *WeightedEdgeList { + return &WeightedEdgeList{ + Order: len(g), + WeightFunc: w, + Edges: g.EdgeList(), + } +} + +// WeightedInDegree computes the weighted in-degree of each node in g +// for a given weight function w. +// +// The weighted in-degree of a node is the sum of weights of arcs going to +// the node. +// +// A weighted degree of a node is often termed the "strength" of a node. +// +// See note for undirected graphs at LabeledAdjacencyList.WeightedOutDegree. +func (g LabeledAdjacencyList) WeightedInDegree(w WeightFunc) []float64 { + ind := make([]float64, len(g)) + for _, to := range g { + for _, to := range to { + ind[to.To] += w(to.Label) + } + } + return ind +} + +// WeightedOutDegree computes the weighted out-degree of the specified node +// for a given weight function w. +// +// The weighted out-degree of a node is the sum of weights of arcs going from +// the node. +// +// A weighted degree of a node is often termed the "strength" of a node. +// +// Note for undirected graphs, the WeightedOutDegree result for a node will +// equal the WeightedInDegree for the node. You can use WeightedInDegree if +// you have need for the weighted degrees of all nodes or use WeightedOutDegree +// to compute the weighted degrees of individual nodes. In either case loops +// are counted just once, unlike the (unweighted) UndirectedDegree methods. +func (g LabeledAdjacencyList) WeightedOutDegree(n NI, w WeightFunc) (d float64) { + for _, to := range g[n] { + d += w(to.Label) + } + return +} + +// More about loops and strength: I didn't see consensus on this especially +// in the case of undirected graphs. Some sources said to add in-degree and +// out-degree, which would seemingly double both loops and non-loops. +// Some said to double loops. Some said sum the edge weights and had no +// comment on loops. R of course makes everything an option. The meaning +// of "strength" where loops exist is unclear. So while I could write an +// UndirectedWeighted degree function that doubles loops but not edges, +// I'm going to just leave this for now. diff --git a/vendor/github.com/soniakeys/graph/adj_RO.go b/vendor/github.com/soniakeys/graph/adj_RO.go new file mode 100644 index 00000000..1d37d14e --- /dev/null +++ b/vendor/github.com/soniakeys/graph/adj_RO.go @@ -0,0 +1,387 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// adj_RO.go is code generated from adj_cg.go by directives in graph.go. +// Editing adj_cg.go is okay. +// DO NOT EDIT adj_RO.go. The RO is for Read Only. + +import ( + "math/rand" + "time" +) + +// ArcSize returns the number of arcs in g. +// +// Note that for an undirected graph without loops, the number of undirected +// edges -- the traditional meaning of graph size -- will be ArcSize()/2. +// On the other hand, if g is an undirected graph that has or may have loops, +// g.ArcSize()/2 is not a meaningful quantity. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) ArcSize() int { + m := 0 + for _, to := range g { + m += len(to) + } + return m +} + +// BoundsOk validates that all arcs in g stay within the slice bounds of g. +// +// BoundsOk returns true when no arcs point outside the bounds of g. +// Otherwise it returns false and an example arc that points outside of g. +// +// Most methods of this package assume the BoundsOk condition and may +// panic when they encounter an arc pointing outside of the graph. This +// function can be used to validate a graph when the BoundsOk condition +// is unknown. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) BoundsOk() (ok bool, fr NI, to NI) { + for fr, to := range g { + for _, to := range to { + if to < 0 || to >= NI(len(g)) { + return false, NI(fr), to + } + } + } + return true, -1, to +} + +// BreadthFirst traverses a directed or undirected graph in breadth first order. +// +// Argument start is the start node for the traversal. If r is nil, nodes are +// visited in deterministic order. If a random number generator is supplied, +// nodes at each level are visited in random order. +// +// Argument f can be nil if you have no interest in the FromList path result. +// If FromList f is non-nil, the method populates f.Paths and sets f.MaxLen. +// It does not set f.Leaves. For convenience argument f can be a zero value +// FromList. If f.Paths is nil, the FromList is initialized first. If f.Paths +// is non-nil however, the FromList is used as is. The method uses a value of +// PathEnd.Len == 0 to indentify unvisited nodes. Existing non-zero values +// will limit the traversal. +// +// Traversal calls the visitor function v for each node starting with node +// start. If v returns true, traversal continues. If v returns false, the +// traversal terminates immediately. PathEnd Len and From values are updated +// before calling the visitor function. +// +// On return f.Paths and f.MaxLen are set but not f.Leaves. +// +// Returned is the number of nodes visited and ok = true if the traversal +// ran to completion or ok = false if it was terminated by the visitor +// function returning false. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) BreadthFirst(start NI, r *rand.Rand, f *FromList, v OkNodeVisitor) (visited int, ok bool) { + switch { + case f == nil: + e := NewFromList(len(g)) + f = &e + case f.Paths == nil: + *f = NewFromList(len(g)) + } + rp := f.Paths + // the frontier consists of nodes all at the same level + frontier := []NI{start} + level := 1 + // assign path when node is put on frontier, + rp[start] = PathEnd{Len: level, From: -1} + for { + f.MaxLen = level + level++ + var next []NI + if r == nil { + for _, n := range frontier { + visited++ + if !v(n) { // visit nodes as they come off frontier + return + } + for _, nb := range g[n] { + if rp[nb].Len == 0 { + next = append(next, nb) + rp[nb] = PathEnd{From: n, Len: level} + } + } + } + } else { // take nodes off frontier at random + for _, i := range r.Perm(len(frontier)) { + n := frontier[i] + // remainder of block same as above + visited++ + if !v(n) { + return + } + for _, nb := range g[n] { + if rp[nb].Len == 0 { + next = append(next, nb) + rp[nb] = PathEnd{From: n, Len: level} + } + } + } + } + if len(next) == 0 { + break + } + frontier = next + } + return visited, true +} + +// BreadthFirstPath finds a single path from start to end with a minimum +// number of nodes. +// +// Returned is the path as list of nodes. +// The result is nil if no path was found. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) BreadthFirstPath(start, end NI) []NI { + var f FromList + g.BreadthFirst(start, nil, &f, func(n NI) bool { return n != end }) + return f.PathTo(end, nil) +} + +// Copy makes a deep copy of g. +// Copy also computes the arc size ma, the number of arcs. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) Copy() (c AdjacencyList, ma int) { + c = make(AdjacencyList, len(g)) + for n, to := range g { + c[n] = append([]NI{}, to...) + ma += len(to) + } + return +} + +// DepthFirst traverses a graph depth first. +// +// As it traverses it calls visitor function v for each node. If v returns +// false at any point, the traversal is terminated immediately and DepthFirst +// returns false. Otherwise DepthFirst returns true. +// +// DepthFirst uses argument bm is used as a bitmap to guide the traversal. +// For a complete traversal, bm should be 0 initially. During the +// traversal, bits are set corresponding to each node visited. +// The bit is set before calling the visitor function. +// +// Argument bm can be nil if you have no need for it. +// In this case a bitmap is created internally for one-time use. +// +// Alternatively v can be nil. In this case traversal still proceeds and +// updates the bitmap, which can be a useful result. +// DepthFirst always returns true in this case. +// +// It makes no sense for both bm and v to be nil. In this case DepthFirst +// returns false immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) DepthFirst(start NI, bm *Bits, v OkNodeVisitor) (ok bool) { + if bm == nil { + if v == nil { + return false + } + bm = &Bits{} + } + var df func(n NI) bool + df = func(n NI) bool { + if bm.Bit(n) == 1 { + return true + } + bm.SetBit(n, 1) + if v != nil && !v(n) { + return false + } + for _, nb := range g[n] { + if !df(nb) { + return false + } + } + return true + } + return df(start) +} + +// DepthFirstRandom traverses a graph depth first, but following arcs in +// random order among arcs from a single node. +// +// If Rand r is nil, the method creates a new source and generator for +// one-time use. +// +// Usage is otherwise like the DepthFirst method. See DepthFirst. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) DepthFirstRandom(start NI, bm *Bits, v OkNodeVisitor, r *rand.Rand) (ok bool) { + if bm == nil { + if v == nil { + return false + } + bm = &Bits{} + } + if r == nil { + r = rand.New(rand.NewSource(time.Now().UnixNano())) + } + var df func(n NI) bool + df = func(n NI) bool { + if bm.Bit(n) == 1 { + return true + } + bm.SetBit(n, 1) + if v != nil && !v(n) { + return false + } + to := g[n] + for _, i := range r.Perm(len(to)) { + if !df(to[i]) { + return false + } + } + return true + } + return df(start) +} + +// HasArc returns true if g has any arc from node fr to node to. +// +// Also returned is the index within the slice of arcs from node fr. +// If no arc from fr to to is present, HasArc returns false, -1. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) HasArc(fr, to NI) (bool, int) { + for x, h := range g[fr] { + if h == to { + return true, x + } + } + return false, -1 +} + +// HasLoop identifies if a graph contains a loop, an arc that leads from a +// a node back to the same node. +// +// If the graph has a loop, the result is an example node that has a loop. +// +// If g contains a loop, the method returns true and an example of a node +// with a loop. If there are no loops in g, the method returns false, -1. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) HasLoop() (bool, NI) { + for fr, to := range g { + for _, to := range to { + if NI(fr) == to { + return true, to + } + } + } + return false, -1 +} + +// HasParallelMap identifies if a graph contains parallel arcs, multiple arcs +// that lead from a node to the same node. +// +// If the graph has parallel arcs, the method returns true and +// results fr and to represent an example where there are parallel arcs +// from node fr to node to. +// +// If there are no parallel arcs, the method returns false, -1 -1. +// +// Multiple loops on a node count as parallel arcs. +// +// "Map" in the method name indicates that a Go map is used to detect parallel +// arcs. Compared to method HasParallelSort, this gives better asymtotic +// performance for large dense graphs but may have increased overhead for +// small or sparse graphs. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) HasParallelMap() (has bool, fr, to NI) { + for n, to := range g { + if len(to) == 0 { + continue + } + m := map[NI]struct{}{} + for _, to := range to { + if _, ok := m[to]; ok { + return true, NI(n), to + } + m[to] = struct{}{} + } + } + return false, -1, -1 +} + +// IsSimple checks for loops and parallel arcs. +// +// A graph is "simple" if it has no loops or parallel arcs. +// +// IsSimple returns true, -1 for simple graphs. If a loop or parallel arc is +// found, simple returns false and a node that represents a counterexample +// to the graph being simple. +// +// See also separate methods HasLoop and HasParallel. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) IsSimple() (ok bool, n NI) { + if lp, n := g.HasLoop(); lp { + return false, n + } + if pa, n, _ := g.HasParallelSort(); pa { + return false, n + } + return true, -1 +} + +// IsolatedNodes returns a bitmap of isolated nodes in receiver graph g. +// +// An isolated node is one with no arcs going to or from it. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g AdjacencyList) IsolatedNodes() (i Bits) { + i.SetAll(len(g)) + for fr, to := range g { + if len(to) > 0 { + i.SetBit(NI(fr), 0) + for _, to := range to { + i.SetBit(to, 0) + } + } + } + return +} + +/* +MaxmimalClique finds a maximal clique containing the node n. + +Not sure this is good for anything. It produces a single maximal clique +but there can be multiple maximal cliques containing a given node. +This algorithm just returns one of them, not even necessarily the +largest one. + +func (g LabeledAdjacencyList) MaximalClique(n int) []int { + c := []int{n} + var m bitset.BitSet + m.Set(uint(n)) + for fr, to := range g { + if fr == n { + continue + } + if len(to) < len(c) { + continue + } + f := 0 + for _, to := range to { + if m.Test(uint(to.To)) { + f++ + if f == len(c) { + c = append(c, to.To) + m.Set(uint(to.To)) + break + } + } + } + } + return c +} +*/ diff --git a/vendor/github.com/soniakeys/graph/adj_cg.go b/vendor/github.com/soniakeys/graph/adj_cg.go new file mode 100644 index 00000000..a484ee04 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/adj_cg.go @@ -0,0 +1,387 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// adj_RO.go is code generated from adj_cg.go by directives in graph.go. +// Editing adj_cg.go is okay. +// DO NOT EDIT adj_RO.go. The RO is for Read Only. + +import ( + "math/rand" + "time" +) + +// ArcSize returns the number of arcs in g. +// +// Note that for an undirected graph without loops, the number of undirected +// edges -- the traditional meaning of graph size -- will be ArcSize()/2. +// On the other hand, if g is an undirected graph that has or may have loops, +// g.ArcSize()/2 is not a meaningful quantity. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) ArcSize() int { + m := 0 + for _, to := range g { + m += len(to) + } + return m +} + +// BoundsOk validates that all arcs in g stay within the slice bounds of g. +// +// BoundsOk returns true when no arcs point outside the bounds of g. +// Otherwise it returns false and an example arc that points outside of g. +// +// Most methods of this package assume the BoundsOk condition and may +// panic when they encounter an arc pointing outside of the graph. This +// function can be used to validate a graph when the BoundsOk condition +// is unknown. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) BoundsOk() (ok bool, fr NI, to Half) { + for fr, to := range g { + for _, to := range to { + if to.To < 0 || to.To >= NI(len(g)) { + return false, NI(fr), to + } + } + } + return true, -1, to +} + +// BreadthFirst traverses a directed or undirected graph in breadth first order. +// +// Argument start is the start node for the traversal. If r is nil, nodes are +// visited in deterministic order. If a random number generator is supplied, +// nodes at each level are visited in random order. +// +// Argument f can be nil if you have no interest in the FromList path result. +// If FromList f is non-nil, the method populates f.Paths and sets f.MaxLen. +// It does not set f.Leaves. For convenience argument f can be a zero value +// FromList. If f.Paths is nil, the FromList is initialized first. If f.Paths +// is non-nil however, the FromList is used as is. The method uses a value of +// PathEnd.Len == 0 to indentify unvisited nodes. Existing non-zero values +// will limit the traversal. +// +// Traversal calls the visitor function v for each node starting with node +// start. If v returns true, traversal continues. If v returns false, the +// traversal terminates immediately. PathEnd Len and From values are updated +// before calling the visitor function. +// +// On return f.Paths and f.MaxLen are set but not f.Leaves. +// +// Returned is the number of nodes visited and ok = true if the traversal +// ran to completion or ok = false if it was terminated by the visitor +// function returning false. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) BreadthFirst(start NI, r *rand.Rand, f *FromList, v OkNodeVisitor) (visited int, ok bool) { + switch { + case f == nil: + e := NewFromList(len(g)) + f = &e + case f.Paths == nil: + *f = NewFromList(len(g)) + } + rp := f.Paths + // the frontier consists of nodes all at the same level + frontier := []NI{start} + level := 1 + // assign path when node is put on frontier, + rp[start] = PathEnd{Len: level, From: -1} + for { + f.MaxLen = level + level++ + var next []NI + if r == nil { + for _, n := range frontier { + visited++ + if !v(n) { // visit nodes as they come off frontier + return + } + for _, nb := range g[n] { + if rp[nb.To].Len == 0 { + next = append(next, nb.To) + rp[nb.To] = PathEnd{From: n, Len: level} + } + } + } + } else { // take nodes off frontier at random + for _, i := range r.Perm(len(frontier)) { + n := frontier[i] + // remainder of block same as above + visited++ + if !v(n) { + return + } + for _, nb := range g[n] { + if rp[nb.To].Len == 0 { + next = append(next, nb.To) + rp[nb.To] = PathEnd{From: n, Len: level} + } + } + } + } + if len(next) == 0 { + break + } + frontier = next + } + return visited, true +} + +// BreadthFirstPath finds a single path from start to end with a minimum +// number of nodes. +// +// Returned is the path as list of nodes. +// The result is nil if no path was found. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) BreadthFirstPath(start, end NI) []NI { + var f FromList + g.BreadthFirst(start, nil, &f, func(n NI) bool { return n != end }) + return f.PathTo(end, nil) +} + +// Copy makes a deep copy of g. +// Copy also computes the arc size ma, the number of arcs. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) Copy() (c LabeledAdjacencyList, ma int) { + c = make(LabeledAdjacencyList, len(g)) + for n, to := range g { + c[n] = append([]Half{}, to...) + ma += len(to) + } + return +} + +// DepthFirst traverses a graph depth first. +// +// As it traverses it calls visitor function v for each node. If v returns +// false at any point, the traversal is terminated immediately and DepthFirst +// returns false. Otherwise DepthFirst returns true. +// +// DepthFirst uses argument bm is used as a bitmap to guide the traversal. +// For a complete traversal, bm should be 0 initially. During the +// traversal, bits are set corresponding to each node visited. +// The bit is set before calling the visitor function. +// +// Argument bm can be nil if you have no need for it. +// In this case a bitmap is created internally for one-time use. +// +// Alternatively v can be nil. In this case traversal still proceeds and +// updates the bitmap, which can be a useful result. +// DepthFirst always returns true in this case. +// +// It makes no sense for both bm and v to be nil. In this case DepthFirst +// returns false immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) DepthFirst(start NI, bm *Bits, v OkNodeVisitor) (ok bool) { + if bm == nil { + if v == nil { + return false + } + bm = &Bits{} + } + var df func(n NI) bool + df = func(n NI) bool { + if bm.Bit(n) == 1 { + return true + } + bm.SetBit(n, 1) + if v != nil && !v(n) { + return false + } + for _, nb := range g[n] { + if !df(nb.To) { + return false + } + } + return true + } + return df(start) +} + +// DepthFirstRandom traverses a graph depth first, but following arcs in +// random order among arcs from a single node. +// +// If Rand r is nil, the method creates a new source and generator for +// one-time use. +// +// Usage is otherwise like the DepthFirst method. See DepthFirst. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) DepthFirstRandom(start NI, bm *Bits, v OkNodeVisitor, r *rand.Rand) (ok bool) { + if bm == nil { + if v == nil { + return false + } + bm = &Bits{} + } + if r == nil { + r = rand.New(rand.NewSource(time.Now().UnixNano())) + } + var df func(n NI) bool + df = func(n NI) bool { + if bm.Bit(n) == 1 { + return true + } + bm.SetBit(n, 1) + if v != nil && !v(n) { + return false + } + to := g[n] + for _, i := range r.Perm(len(to)) { + if !df(to[i].To) { + return false + } + } + return true + } + return df(start) +} + +// HasArc returns true if g has any arc from node fr to node to. +// +// Also returned is the index within the slice of arcs from node fr. +// If no arc from fr to to is present, HasArc returns false, -1. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) HasArc(fr, to NI) (bool, int) { + for x, h := range g[fr] { + if h.To == to { + return true, x + } + } + return false, -1 +} + +// HasLoop identifies if a graph contains a loop, an arc that leads from a +// a node back to the same node. +// +// If the graph has a loop, the result is an example node that has a loop. +// +// If g contains a loop, the method returns true and an example of a node +// with a loop. If there are no loops in g, the method returns false, -1. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) HasLoop() (bool, NI) { + for fr, to := range g { + for _, to := range to { + if NI(fr) == to.To { + return true, to.To + } + } + } + return false, -1 +} + +// HasParallelMap identifies if a graph contains parallel arcs, multiple arcs +// that lead from a node to the same node. +// +// If the graph has parallel arcs, the method returns true and +// results fr and to represent an example where there are parallel arcs +// from node fr to node to. +// +// If there are no parallel arcs, the method returns false, -1 -1. +// +// Multiple loops on a node count as parallel arcs. +// +// "Map" in the method name indicates that a Go map is used to detect parallel +// arcs. Compared to method HasParallelSort, this gives better asymtotic +// performance for large dense graphs but may have increased overhead for +// small or sparse graphs. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) HasParallelMap() (has bool, fr, to NI) { + for n, to := range g { + if len(to) == 0 { + continue + } + m := map[NI]struct{}{} + for _, to := range to { + if _, ok := m[to.To]; ok { + return true, NI(n), to.To + } + m[to.To] = struct{}{} + } + } + return false, -1, -1 +} + +// IsSimple checks for loops and parallel arcs. +// +// A graph is "simple" if it has no loops or parallel arcs. +// +// IsSimple returns true, -1 for simple graphs. If a loop or parallel arc is +// found, simple returns false and a node that represents a counterexample +// to the graph being simple. +// +// See also separate methods HasLoop and HasParallel. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) IsSimple() (ok bool, n NI) { + if lp, n := g.HasLoop(); lp { + return false, n + } + if pa, n, _ := g.HasParallelSort(); pa { + return false, n + } + return true, -1 +} + +// IsolatedNodes returns a bitmap of isolated nodes in receiver graph g. +// +// An isolated node is one with no arcs going to or from it. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledAdjacencyList) IsolatedNodes() (i Bits) { + i.SetAll(len(g)) + for fr, to := range g { + if len(to) > 0 { + i.SetBit(NI(fr), 0) + for _, to := range to { + i.SetBit(to.To, 0) + } + } + } + return +} + +/* +MaxmimalClique finds a maximal clique containing the node n. + +Not sure this is good for anything. It produces a single maximal clique +but there can be multiple maximal cliques containing a given node. +This algorithm just returns one of them, not even necessarily the +largest one. + +func (g LabeledAdjacencyList) MaximalClique(n int) []int { + c := []int{n} + var m bitset.BitSet + m.Set(uint(n)) + for fr, to := range g { + if fr == n { + continue + } + if len(to) < len(c) { + continue + } + f := 0 + for _, to := range to { + if m.Test(uint(to.To)) { + f++ + if f == len(c) { + c = append(c, to.To) + m.Set(uint(to.To)) + break + } + } + } + } + return c +} +*/ diff --git a/vendor/github.com/soniakeys/graph/bits.go b/vendor/github.com/soniakeys/graph/bits.go new file mode 100644 index 00000000..b86703ca --- /dev/null +++ b/vendor/github.com/soniakeys/graph/bits.go @@ -0,0 +1,207 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +import ( + "fmt" + "math/big" +) + +// Bits is bitmap, or bitset, intended to store a single bit of information +// per node of a graph. +// +// The current implementation is backed by a big.Int and so is a reference +// type in the same way a big.Int is. +type Bits struct { + i big.Int +} + +// NewBits constructs a Bits value with the bits ns set to 1. +func NewBits(ns ...NI) (b Bits) { + for _, n := range ns { + b.SetBit(n, 1) + } + return +} + +// AllNot sets n bits of z to the complement of x. +// +// It is a convenience method for SetAll followed by AndNot. +func (z *Bits) AllNot(n int, x Bits) { + var y Bits + y.SetAll(n) + z.AndNot(y, x) +} + +// And sets z = x & y. +func (z *Bits) And(x, y Bits) { + z.i.And(&x.i, &y.i) +} + +// AndNot sets z = x &^ y. +func (z *Bits) AndNot(x, y Bits) { + z.i.AndNot(&x.i, &y.i) +} + +// Bit returns the value of the n'th bit of x. +func (b Bits) Bit(n NI) uint { + return b.i.Bit(int(n)) +} + +// Clear sets all bits to 0. +func (z *Bits) Clear() { + *z = Bits{} +} + +// Format satisfies fmt.Formatter for fmt.Printf and related methods. +// +// graph.Bits format exactly like big.Ints. +func (b Bits) Format(s fmt.State, ch rune) { + b.i.Format(s, ch) +} + +// From returns the position of the first 1 bit at or after (from) position n. +// +// It returns -1 if there is no one bit at or after position n. +// +// This provides one way to iterate over one bits. +// To iterate over the one bits, call with n = 0 to get the the first +// one bit, then call with the result + 1 to get successive one bits. +// Unlike the Iterate method, this technique is stateless and so allows +// bits to be changed between successive calls. +// +// See also Iterate. +// +// (From is just a short word that means "at or after" here; +// it has nothing to do with arc direction.) +func (b Bits) From(n NI) NI { + words := b.i.Bits() + i := int(n) + x := i >> wordExp // x now index of word containing bit i. + if x >= len(words) { + return -1 + } + // test for 1 in this word at or after n + if wx := words[x] >> (uint(i) & (wordSize - 1)); wx != 0 { + return n + NI(trailingZeros(wx)) + } + x++ + for y, wy := range words[x:] { + if wy != 0 { + return NI((x+y)<>= uint(t + 1) + if w == 0 { + break + } + t = trailingZeros(w) + i += 1 + t + } + } + } + return true +} + +// Or sets z = x | y. +func (z *Bits) Or(x, y Bits) { + z.i.Or(&x.i, &y.i) +} + +// PopCount returns the number of 1 bits. +func (b Bits) PopCount() (c int) { + // algorithm selected to be efficient for sparse bit sets. + for _, w := range b.i.Bits() { + for w != 0 { + w &= w - 1 + c++ + } + } + return +} + +// Set sets the bits of z to the bits of x. +func (z *Bits) Set(x Bits) { + z.i.Set(&x.i) +} + +var one = big.NewInt(1) + +// SetAll sets z to have n 1 bits. +// +// It's useful for initializing z to have a 1 for each node of a graph. +func (z *Bits) SetAll(n int) { + z.i.Sub(z.i.Lsh(one, uint(n)), one) +} + +// SetBit sets the n'th bit to b, where be is a 0 or 1. +func (z *Bits) SetBit(n NI, b uint) { + z.i.SetBit(&z.i, int(n), b) +} + +// Single returns true if b has exactly one 1 bit. +func (b Bits) Single() bool { + // like PopCount, but stop as soon as two are found + c := 0 + for _, w := range b.i.Bits() { + for w != 0 { + w &= w - 1 + c++ + if c == 2 { + return false + } + } + } + return c == 1 +} + +// Slice returns a slice with the positions of each 1 bit. +func (b Bits) Slice() (s []NI) { + // (alternative implementation might use Popcount and make to get the + // exact cap slice up front. unclear if that would be better.) + b.Iterate(func(n NI) bool { + s = append(s, n) + return true + }) + return +} + +// Xor sets z = x ^ y. +func (z *Bits) Xor(x, y Bits) { + z.i.Xor(&x.i, &y.i) +} + +// Zero returns true if there are no 1 bits. +func (b Bits) Zero() bool { + return len(b.i.Bits()) == 0 +} + +// trailingZeros returns the number of trailing 0 bits in v. +// +// If v is 0, it returns 0. +func trailingZeros(v big.Word) int { + return deBruijnBits[v&-v*deBruijnMultiple>>deBruijnShift] +} diff --git a/vendor/github.com/soniakeys/graph/bits32.go b/vendor/github.com/soniakeys/graph/bits32.go new file mode 100644 index 00000000..18e07f9a --- /dev/null +++ b/vendor/github.com/soniakeys/graph/bits32.go @@ -0,0 +1,23 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +// +build 386 arm + +package graph + +// "word" here is math/big.Word +const ( + wordSize = 32 + wordExp = 5 // 2^5 = 32 +) + +// deBruijn magic numbers used in trailingZeros() +// +// reference: http://graphics.stanford.edu/~seander/bithacks.html +const deBruijnMultiple = 0x077CB531 +const deBruijnShift = 27 + +var deBruijnBits = []int{ + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9, +} diff --git a/vendor/github.com/soniakeys/graph/bits64.go b/vendor/github.com/soniakeys/graph/bits64.go new file mode 100644 index 00000000..ab601dd6 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/bits64.go @@ -0,0 +1,22 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +// +build !386,!arm + +package graph + +const ( + wordSize = 64 + wordExp = 6 // 2^6 = 64 +) + +// reference: http://graphics.stanford.edu/~seander/bithacks.html +const deBruijnMultiple = 0x03f79d71b4ca8b09 +const deBruijnShift = 58 + +var deBruijnBits = []int{ + 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, + 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, + 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, + 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, +} diff --git a/vendor/github.com/soniakeys/graph/dir.go b/vendor/github.com/soniakeys/graph/dir.go new file mode 100644 index 00000000..508306d1 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/dir.go @@ -0,0 +1,538 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// dir.go has methods specific to directed graphs, types Directed and +// LabeledDirected. +// +// Methods on Directed are first, with exported methods alphabetized. + +import "errors" + +// DAGMaxLenPath finds a maximum length path in a directed acyclic graph. +// +// Argument ordering must be a topological ordering of g. +func (g Directed) DAGMaxLenPath(ordering []NI) (path []NI) { + // dynamic programming. visit nodes in reverse order. for each, compute + // longest path as one plus longest of 'to' nodes. + // Visits each arc once. O(m). + // + // Similar code in label.go + var n NI + mlp := make([][]NI, len(g.AdjacencyList)) // index by node number + for i := len(ordering) - 1; i >= 0; i-- { + fr := ordering[i] // node number + to := g.AdjacencyList[fr] + if len(to) == 0 { + continue + } + mt := to[0] + for _, to := range to[1:] { + if len(mlp[to]) > len(mlp[mt]) { + mt = to + } + } + p := append([]NI{mt}, mlp[mt]...) + mlp[fr] = p + if len(p) > len(path) { + n = fr + path = p + } + } + return append([]NI{n}, path...) +} + +// EulerianCycle finds an Eulerian cycle in a directed multigraph. +// +// * If g has no nodes, result is nil, nil. +// +// * If g is Eulerian, result is an Eulerian cycle with err = nil. +// The cycle result is a list of nodes, where the first and last +// nodes are the same. +// +// * Otherwise, result is nil, error +// +// Internally, EulerianCycle copies the entire graph g. +// See EulerianCycleD for a more space efficient version. +func (g Directed) EulerianCycle() ([]NI, error) { + c, m := g.Copy() + return c.EulerianCycleD(m) +} + +// EulerianCycleD finds an Eulerian cycle in a directed multigraph. +// +// EulerianCycleD is destructive on its receiver g. See EulerianCycle for +// a non-destructive version. +// +// Argument ma must be the correct arc size, or number of arcs in g. +// +// * If g has no nodes, result is nil, nil. +// +// * If g is Eulerian, result is an Eulerian cycle with err = nil. +// The cycle result is a list of nodes, where the first and last +// nodes are the same. +// +// * Otherwise, result is nil, error +func (g Directed) EulerianCycleD(ma int) ([]NI, error) { + if len(g.AdjacencyList) == 0 { + return nil, nil + } + e := newEulerian(g.AdjacencyList, ma) + for e.s >= 0 { + v := e.top() // v is node that starts cycle + e.push() + // if Eulerian, we'll always come back to starting node + if e.top() != v { + return nil, errors.New("not balanced") + } + e.keep() + } + if !e.uv.Zero() { + return nil, errors.New("not strongly connected") + } + return e.p, nil +} + +// EulerianPath finds an Eulerian path in a directed multigraph. +// +// * If g has no nodes, result is nil, nil. +// +// * If g has an Eulerian path, result is an Eulerian path with err = nil. +// The path result is a list of nodes, where the first node is start. +// +// * Otherwise, result is nil, error +// +// Internally, EulerianPath copies the entire graph g. +// See EulerianPathD for a more space efficient version. +func (g Directed) EulerianPath() ([]NI, error) { + ind := g.InDegree() + var start NI + for n, to := range g.AdjacencyList { + if len(to) > ind[n] { + start = NI(n) + break + } + } + c, m := g.Copy() + return c.EulerianPathD(m, start) +} + +// EulerianPathD finds an Eulerian path in a directed multigraph. +// +// EulerianPathD is destructive on its receiver g. See EulerianPath for +// a non-destructive version. +// +// Argument ma must be the correct arc size, or number of arcs in g. +// Argument start must be a valid start node for the path. +// +// * If g has no nodes, result is nil, nil. +// +// * If g has an Eulerian path, result is an Eulerian path with err = nil. +// The path result is a list of nodes, where the first node is start. +// +// * Otherwise, result is nil, error +func (g Directed) EulerianPathD(ma int, start NI) ([]NI, error) { + if len(g.AdjacencyList) == 0 { + return nil, nil + } + e := newEulerian(g.AdjacencyList, ma) + e.p[0] = start + // unlike EulerianCycle, the first path doesn't have be a cycle. + e.push() + e.keep() + for e.s >= 0 { + start = e.top() + e.push() + // paths after the first must be cycles though + // (as long as there are nodes on the stack) + if e.top() != start { + return nil, errors.New("no Eulerian path") + } + e.keep() + } + if !e.uv.Zero() { + return nil, errors.New("no Eulerian path") + } + return e.p, nil +} + +// starting at the node on the top of the stack, follow arcs until stuck. +// mark nodes visited, push nodes on stack, remove arcs from g. +func (e *eulerian) push() { + for u := e.top(); ; { + e.uv.SetBit(u, 0) // reset unvisited bit + arcs := e.g[u] + if len(arcs) == 0 { + return // stuck + } + w := arcs[0] // follow first arc + e.s++ // push followed node on stack + e.p[e.s] = w + e.g[u] = arcs[1:] // consume arc + u = w + } +} + +// like push, but for for undirected graphs. +func (e *eulerian) pushUndir() { + for u := e.top(); ; { + e.uv.SetBit(u, 0) + arcs := e.g[u] + if len(arcs) == 0 { + return + } + w := arcs[0] + e.s++ + e.p[e.s] = w + e.g[u] = arcs[1:] // consume arc + // here is the only difference, consume reciprocal arc as well: + a2 := e.g[w] + for x, rx := range a2 { + if rx == u { // here it is + last := len(a2) - 1 + a2[x] = a2[last] // someone else gets the seat + e.g[w] = a2[:last] // and it's gone. + break + } + } + u = w + } +} + +// starting with the node on top of the stack, move nodes with no arcs. +func (e *eulerian) keep() { + for e.s >= 0 { + n := e.top() + if len(e.g[n]) > 0 { + break + } + e.p[e.m] = n + e.s-- + e.m-- + } +} + +type eulerian struct { + g AdjacencyList // working copy of graph, it gets consumed + m int // number of arcs in g, updated as g is consumed + uv Bits // unvisited + // low end of p is stack of unfinished nodes + // high end is finished path + p []NI // stack + path + s int // stack pointer +} + +func (e *eulerian) top() NI { + return e.p[e.s] +} + +func newEulerian(g AdjacencyList, m int) *eulerian { + e := &eulerian{ + g: g, + m: m, + p: make([]NI, m+1), + } + e.uv.SetAll(len(g)) + return e +} + +// MaximalNonBranchingPaths finds all paths in a directed graph that are +// "maximal" and "non-branching". +// +// A non-branching path is one where path nodes other than the first and last +// have exactly one arc leading to the node and one arc leading from the node, +// thus there is no possibility to branch away to a different path. +// +// A maximal non-branching path cannot be extended to a longer non-branching +// path by including another node at either end. +// +// In the case of a cyclic non-branching path, the first and last elements +// of the path will be the same node, indicating an isolated cycle. +// +// The method calls the emit argument for each path or isolated cycle in g, +// as long as emit returns true. If emit returns false, +// MaximalNonBranchingPaths returns immediately. +func (g Directed) MaximalNonBranchingPaths(emit func([]NI) bool) { + ind := g.InDegree() + var uv Bits + uv.SetAll(len(g.AdjacencyList)) + for v, vTo := range g.AdjacencyList { + if !(ind[v] == 1 && len(vTo) == 1) { + for _, w := range vTo { + n := []NI{NI(v), w} + uv.SetBit(NI(v), 0) + uv.SetBit(w, 0) + wTo := g.AdjacencyList[w] + for ind[w] == 1 && len(wTo) == 1 { + u := wTo[0] + n = append(n, u) + uv.SetBit(u, 0) + w = u + wTo = g.AdjacencyList[w] + } + if !emit(n) { // n is a path + return + } + } + } + } + // use uv.From rather than uv.Iterate. + // Iterate doesn't work here because we're modifying uv + for b := uv.From(0); b >= 0; b = uv.From(b + 1) { + v := NI(b) + n := []NI{v} + for w := v; ; { + w = g.AdjacencyList[w][0] + uv.SetBit(w, 0) + n = append(n, w) + if w == v { + break + } + } + if !emit(n) { // n is an isolated cycle + return + } + } +} + +// Undirected returns copy of g augmented as needed to make it undirected. +func (g Directed) Undirected() Undirected { + c, _ := g.AdjacencyList.Copy() // start with a copy + rw := make(AdjacencyList, len(g.AdjacencyList)) // "reciprocals wanted" + for fr, to := range g.AdjacencyList { + arc: // for each arc in g + for _, to := range to { + if to == NI(fr) { + continue // loop + } + // search wanted arcs + wf := rw[fr] + for i, w := range wf { + if w == to { // found, remove + last := len(wf) - 1 + wf[i] = wf[last] + rw[fr] = wf[:last] + continue arc + } + } + // arc not found, add to reciprocal to wanted list + rw[to] = append(rw[to], NI(fr)) + } + } + // add missing reciprocals + for fr, to := range rw { + c[fr] = append(c[fr], to...) + } + return Undirected{c} +} + +// StronglyConnectedComponents identifies strongly connected components +// in a directed graph. +// +// Algorithm by David J. Pearce, from "An Improved Algorithm for Finding the +// Strongly Connected Components of a Directed Graph". It is algorithm 3, +// PEA_FIND_SCC2 in +// http://homepages.mcs.vuw.ac.nz/~djp/files/P05.pdf, accessed 22 Feb 2015. +// +// Returned is a list of components, each component is a list of nodes. +/* +func (g Directed) StronglyConnectedComponents() []int { + rindex := make([]int, len(g)) + S := []int{} + index := 1 + c := len(g) - 1 + visit := func(v int) { + root := true + rindex[v] = index + index++ + for _, w := range g[v] { + if rindex[w] == 0 { + visit(w) + } + if rindex[w] < rindex[v] { + rindex[v] = rindex[w] + root = false + } + } + if root { + index-- + for top := len(S) - 1; top >= 0 && rindex[v] <= rindex[top]; top-- { + w = rindex[top] + S = S[:top] + rindex[w] = c + index-- + } + rindex[v] = c + c-- + } else { + S = append(S, v) + } + } + for v := range g { + if rindex[v] == 0 { + visit(v) + } + } + return rindex +} +*/ + +// Transpose constructs a new adjacency list with all arcs reversed. +// +// For every arc from->to of g, the result will have an arc to->from. +// Transpose also counts arcs as it traverses and returns ma the number of arcs +// in g (equal to the number of arcs in the result.) +func (g Directed) Transpose() (t Directed, ma int) { + ta := make(AdjacencyList, len(g.AdjacencyList)) + for n, nbs := range g.AdjacencyList { + for _, nb := range nbs { + ta[nb] = append(ta[nb], NI(n)) + ma++ + } + } + return Directed{ta}, ma +} + +// DAGMaxLenPath finds a maximum length path in a directed acyclic graph. +// +// Length here means number of nodes or arcs, not a sum of arc weights. +// +// Argument ordering must be a topological ordering of g. +// +// Returned is a node beginning a maximum length path, and a path of arcs +// starting from that node. +func (g LabeledDirected) DAGMaxLenPath(ordering []NI) (n NI, path []Half) { + // dynamic programming. visit nodes in reverse order. for each, compute + // longest path as one plus longest of 'to' nodes. + // Visits each arc once. Time complexity O(m). + // + // Similar code in dir.go. + mlp := make([][]Half, len(g.LabeledAdjacencyList)) // index by node number + for i := len(ordering) - 1; i >= 0; i-- { + fr := ordering[i] // node number + to := g.LabeledAdjacencyList[fr] + if len(to) == 0 { + continue + } + mt := to[0] + for _, to := range to[1:] { + if len(mlp[to.To]) > len(mlp[mt.To]) { + mt = to + } + } + p := append([]Half{mt}, mlp[mt.To]...) + mlp[fr] = p + if len(p) > len(path) { + n = fr + path = p + } + } + return +} + +// FromListLabels transposes a labeled graph into a FromList and associated +// list of labels. +// +// Receiver g should be connected as a tree or forest. Specifically no node +// can have multiple incoming arcs. If any node n in g has multiple incoming +// arcs, the method returns (nil, nil, n) where n is a node with multiple +// incoming arcs. +// +// Otherwise (normally) the method populates the From members in a +// FromList.Path, populates a slice of labels, and returns the FromList, +// labels, and -1. +// +// Other members of the FromList are left as zero values. +// Use FromList.RecalcLen and FromList.RecalcLeaves as needed. +func (g LabeledDirected) FromListLabels() (*FromList, []LI, NI) { + labels := make([]LI, len(g.LabeledAdjacencyList)) + paths := make([]PathEnd, len(g.LabeledAdjacencyList)) + for i := range paths { + paths[i].From = -1 + } + for fr, to := range g.LabeledAdjacencyList { + for _, to := range to { + if paths[to.To].From >= 0 { + return nil, nil, to.To + } + paths[to.To].From = NI(fr) + labels[to.To] = to.Label + } + } + return &FromList{Paths: paths}, labels, -1 +} + +// Transpose constructs a new adjacency list that is the transpose of g. +// +// For every arc from->to of g, the result will have an arc to->from. +// Transpose also counts arcs as it traverses and returns ma the number of +// arcs in g (equal to the number of arcs in the result.) +func (g LabeledDirected) Transpose() (t LabeledDirected, ma int) { + ta := make(LabeledAdjacencyList, len(g.LabeledAdjacencyList)) + for n, nbs := range g.LabeledAdjacencyList { + for _, nb := range nbs { + ta[nb.To] = append(ta[nb.To], Half{To: NI(n), Label: nb.Label}) + ma++ + } + } + return LabeledDirected{ta}, ma +} + +// Undirected returns a new undirected graph derived from g, augmented as +// needed to make it undirected, with reciprocal arcs having matching labels. +func (g LabeledDirected) Undirected() LabeledUndirected { + c, _ := g.LabeledAdjacencyList.Copy() // start with a copy + // "reciprocals wanted" + rw := make(LabeledAdjacencyList, len(g.LabeledAdjacencyList)) + for fr, to := range g.LabeledAdjacencyList { + arc: // for each arc in g + for _, to := range to { + if to.To == NI(fr) { + continue // arc is a loop + } + // search wanted arcs + wf := rw[fr] + for i, w := range wf { + if w == to { // found, remove + last := len(wf) - 1 + wf[i] = wf[last] + rw[fr] = wf[:last] + continue arc + } + } + // arc not found, add to reciprocal to wanted list + rw[to.To] = append(rw[to.To], Half{To: NI(fr), Label: to.Label}) + } + } + // add missing reciprocals + for fr, to := range rw { + c[fr] = append(c[fr], to...) + } + return LabeledUndirected{c} +} + +// Unlabeled constructs the unlabeled directed graph corresponding to g. +func (g LabeledDirected) Unlabeled() Directed { + return Directed{g.LabeledAdjacencyList.Unlabeled()} +} + +// UnlabeledTranspose constructs a new adjacency list that is the unlabeled +// transpose of g. +// +// For every arc from->to of g, the result will have an arc to->from. +// Transpose also counts arcs as it traverses and returns ma, the number of +// arcs in g (equal to the number of arcs in the result.) +// +// It is equivalent to g.Unlabeled().Transpose() but constructs the result +// directly. +func (g LabeledDirected) UnlabeledTranspose() (t Directed, ma int) { + ta := make(AdjacencyList, len(g.LabeledAdjacencyList)) + for n, nbs := range g.LabeledAdjacencyList { + for _, nb := range nbs { + ta[nb.To] = append(ta[nb.To], NI(n)) + ma++ + } + } + return Directed{ta}, ma +} diff --git a/vendor/github.com/soniakeys/graph/dir_RO.go b/vendor/github.com/soniakeys/graph/dir_RO.go new file mode 100644 index 00000000..77558a96 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/dir_RO.go @@ -0,0 +1,395 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// dir_RO.go is code generated from dir_cg.go by directives in graph.go. +// Editing dir_cg.go is okay. It is the code generation source. +// DO NOT EDIT dir_RO.go. +// The RO means read only and it is upper case RO to slow you down a bit +// in case you start to edit the file. + +// Balanced returns true if for every node in g, in-degree equals out-degree. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) Balanced() bool { + for n, in := range g.InDegree() { + if in != len(g.AdjacencyList[n]) { + return false + } + } + return true +} + +// Copy makes a deep copy of g. +// Copy also computes the arc size ma, the number of arcs. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) Copy() (c Directed, ma int) { + l, s := g.AdjacencyList.Copy() + return Directed{l}, s +} + +// Cyclic determines if g contains a cycle, a non-empty path from a node +// back to itself. +// +// Cyclic returns true if g contains at least one cycle. It also returns +// an example of an arc involved in a cycle. +// Cyclic returns false if g is acyclic. +// +// Also see Topological, which detects cycles. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) Cyclic() (cyclic bool, fr NI, to NI) { + a := g.AdjacencyList + fr, to = -1, -1 + var temp, perm Bits + var df func(NI) + df = func(n NI) { + switch { + case temp.Bit(n) == 1: + cyclic = true + return + case perm.Bit(n) == 1: + return + } + temp.SetBit(n, 1) + for _, nb := range a[n] { + df(nb) + if cyclic { + if fr < 0 { + fr, to = n, nb + } + return + } + } + temp.SetBit(n, 0) + perm.SetBit(n, 1) + } + for n := range a { + if perm.Bit(NI(n)) == 1 { + continue + } + if df(NI(n)); cyclic { // short circuit as soon as a cycle is found + break + } + } + return +} + +// FromList transposes a labeled graph into a FromList. +// +// Receiver g should be connected as a tree or forest. Specifically no node +// can have multiple incoming arcs. If any node n in g has multiple incoming +// arcs, the method returns (nil, n) where n is a node with multiple +// incoming arcs. +// +// Otherwise (normally) the method populates the From members in a +// FromList.Path and returns the FromList and -1. +// +// Other members of the FromList are left as zero values. +// Use FromList.RecalcLen and FromList.RecalcLeaves as needed. +// +// Unusual cases are parallel arcs and loops. A parallel arc represents +// a case of multiple arcs going to some node and so will lead to a (nil, n) +// return, even though a graph might be considered a multigraph tree. +// A single loop on a node that would otherwise be a root node, though, +// is not a case of multiple incoming arcs and so does not force a (nil, n) +// result. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) FromList() (*FromList, NI) { + paths := make([]PathEnd, len(g.AdjacencyList)) + for i := range paths { + paths[i].From = -1 + } + for fr, to := range g.AdjacencyList { + for _, to := range to { + if paths[to].From >= 0 { + return nil, to + } + paths[to].From = NI(fr) + } + } + return &FromList{Paths: paths}, -1 +} + +// InDegree computes the in-degree of each node in g +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) InDegree() []int { + ind := make([]int, len(g.AdjacencyList)) + for _, nbs := range g.AdjacencyList { + for _, nb := range nbs { + ind[nb]++ + } + } + return ind +} + +// IsTree identifies trees in directed graphs. +// +// Return value isTree is true if the subgraph reachable from root is a tree. +// Further, return value allTree is true if the entire graph g is reachable +// from root. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) IsTree(root NI) (isTree, allTree bool) { + a := g.AdjacencyList + var v Bits + v.SetAll(len(a)) + var df func(NI) bool + df = func(n NI) bool { + if v.Bit(n) == 0 { + return false + } + v.SetBit(n, 0) + for _, to := range a[n] { + if !df(to) { + return false + } + } + return true + } + isTree = df(root) + return isTree, isTree && v.Zero() +} + +// Tarjan identifies strongly connected components in a directed graph using +// Tarjan's algorithm. +// +// The method calls the emit argument for each component identified. Each +// component is a list of nodes. A property of the algorithm is that +// components are emitted in reverse topological order of the condensation. +// (See https://en.wikipedia.org/wiki/Strongly_connected_component#Definitions +// for description of condensation.) +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also TarjanForward and TarjanCondensation. +func (g Directed) Tarjan(emit func([]NI) bool) { + // See "Depth-first search and linear graph algorithms", Robert Tarjan, + // SIAM J. Comput. Vol. 1, No. 2, June 1972. + // + // Implementation here from Wikipedia pseudocode, + // http://en.wikipedia.org/w/index.php?title=Tarjan%27s_strongly_connected_components_algorithm&direction=prev&oldid=647184742 + var indexed, stacked Bits + a := g.AdjacencyList + index := make([]int, len(a)) + lowlink := make([]int, len(a)) + x := 0 + var S []NI + var sc func(NI) bool + sc = func(n NI) bool { + index[n] = x + indexed.SetBit(n, 1) + lowlink[n] = x + x++ + S = append(S, n) + stacked.SetBit(n, 1) + for _, nb := range a[n] { + if indexed.Bit(nb) == 0 { + if !sc(nb) { + return false + } + if lowlink[nb] < lowlink[n] { + lowlink[n] = lowlink[nb] + } + } else if stacked.Bit(nb) == 1 { + if index[nb] < lowlink[n] { + lowlink[n] = index[nb] + } + } + } + if lowlink[n] == index[n] { + var c []NI + for { + last := len(S) - 1 + w := S[last] + S = S[:last] + stacked.SetBit(w, 0) + c = append(c, w) + if w == n { + if !emit(c) { + return false + } + break + } + } + } + return true + } + for n := range a { + if indexed.Bit(NI(n)) == 0 && !sc(NI(n)) { + return + } + } +} + +// TarjanForward returns strongly connected components. +// +// It returns components in the reverse order of Tarjan, for situations +// where a forward topological ordering is easier. +func (g Directed) TarjanForward() [][]NI { + var r [][]NI + g.Tarjan(func(c []NI) bool { + r = append(r, c) + return true + }) + scc := make([][]NI, len(r)) + last := len(r) - 1 + for i, ci := range r { + scc[last-i] = ci + } + return scc +} + +// TarjanCondensation returns strongly connected components and their +// condensation graph. +// +// Components are ordered in a forward topological ordering. +func (g Directed) TarjanCondensation() (scc [][]NI, cd AdjacencyList) { + scc = g.TarjanForward() + cd = make(AdjacencyList, len(scc)) // return value + cond := make([]NI, len(g.AdjacencyList)) // mapping from g node to cd node + for cn := NI(len(scc) - 1); cn >= 0; cn-- { + c := scc[cn] + for _, n := range c { + cond[n] = NI(cn) // map g node to cd node + } + var tos []NI // list of 'to' nodes + var m Bits // tos map + m.SetBit(cn, 1) + for _, n := range c { + for _, to := range g.AdjacencyList[n] { + if ct := cond[to]; m.Bit(ct) == 0 { + m.SetBit(ct, 1) + tos = append(tos, ct) + } + } + } + cd[cn] = tos + } + return +} + +// Topological computes a topological ordering of a directed acyclic graph. +// +// For an acyclic graph, return value ordering is a permutation of node numbers +// in topologically sorted order and cycle will be nil. If the graph is found +// to be cyclic, ordering will be nil and cycle will be the path of a found +// cycle. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) Topological() (ordering, cycle []NI) { + a := g.AdjacencyList + ordering = make([]NI, len(a)) + i := len(ordering) + var temp, perm Bits + var cycleFound bool + var cycleStart NI + var df func(NI) + df = func(n NI) { + switch { + case temp.Bit(n) == 1: + cycleFound = true + cycleStart = n + return + case perm.Bit(n) == 1: + return + } + temp.SetBit(n, 1) + for _, nb := range a[n] { + df(nb) + if cycleFound { + if cycleStart >= 0 { + // a little hack: orderng won't be needed so repurpose the + // slice as cycle. this is read out in reverse order + // as the recursion unwinds. + x := len(ordering) - 1 - len(cycle) + ordering[x] = n + cycle = ordering[x:] + if n == cycleStart { + cycleStart = -1 + } + } + return + } + } + temp.SetBit(n, 0) + perm.SetBit(n, 1) + i-- + ordering[i] = n + } + for n := range a { + if perm.Bit(NI(n)) == 1 { + continue + } + df(NI(n)) + if cycleFound { + return nil, cycle + } + } + return ordering, nil +} + +// TopologicalKahn computes a topological ordering of a directed acyclic graph. +// +// For an acyclic graph, return value ordering is a permutation of node numbers +// in topologically sorted order and cycle will be nil. If the graph is found +// to be cyclic, ordering will be nil and cycle will be the path of a found +// cycle. +// +// This function is based on the algorithm by Arthur Kahn and requires the +// transpose of g be passed as the argument. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Directed) TopologicalKahn(tr Directed) (ordering, cycle []NI) { + // code follows Wikipedia pseudocode. + var L, S []NI + // rem for "remaining edges," this function makes a local copy of the + // in-degrees and consumes that instead of consuming an input. + rem := make([]int, len(g.AdjacencyList)) + for n, fr := range tr.AdjacencyList { + if len(fr) == 0 { + // accumulate "set of all nodes with no incoming edges" + S = append(S, NI(n)) + } else { + // initialize rem from in-degree + rem[n] = len(fr) + } + } + for len(S) > 0 { + last := len(S) - 1 // "remove a node n from S" + n := S[last] + S = S[:last] + L = append(L, n) // "add n to tail of L" + for _, m := range g.AdjacencyList[n] { + // WP pseudo code reads "for each node m..." but it means for each + // node m *remaining in the graph.* We consume rem rather than + // the graph, so "remaining in the graph" for us means rem[m] > 0. + if rem[m] > 0 { + rem[m]-- // "remove edge from the graph" + if rem[m] == 0 { // if "m has no other incoming edges" + S = append(S, m) // "insert m into S" + } + } + } + } + // "If graph has edges," for us means a value in rem is > 0. + for c, in := range rem { + if in > 0 { + // recover cyclic nodes + for _, nb := range g.AdjacencyList[c] { + if rem[nb] > 0 { + cycle = append(cycle, NI(c)) + break + } + } + } + } + if len(cycle) > 0 { + return nil, cycle + } + return L, nil +} diff --git a/vendor/github.com/soniakeys/graph/dir_cg.go b/vendor/github.com/soniakeys/graph/dir_cg.go new file mode 100644 index 00000000..2b82f4f1 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/dir_cg.go @@ -0,0 +1,395 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// dir_RO.go is code generated from dir_cg.go by directives in graph.go. +// Editing dir_cg.go is okay. It is the code generation source. +// DO NOT EDIT dir_RO.go. +// The RO means read only and it is upper case RO to slow you down a bit +// in case you start to edit the file. + +// Balanced returns true if for every node in g, in-degree equals out-degree. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) Balanced() bool { + for n, in := range g.InDegree() { + if in != len(g.LabeledAdjacencyList[n]) { + return false + } + } + return true +} + +// Copy makes a deep copy of g. +// Copy also computes the arc size ma, the number of arcs. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) Copy() (c LabeledDirected, ma int) { + l, s := g.LabeledAdjacencyList.Copy() + return LabeledDirected{l}, s +} + +// Cyclic determines if g contains a cycle, a non-empty path from a node +// back to itself. +// +// Cyclic returns true if g contains at least one cycle. It also returns +// an example of an arc involved in a cycle. +// Cyclic returns false if g is acyclic. +// +// Also see Topological, which detects cycles. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) Cyclic() (cyclic bool, fr NI, to Half) { + a := g.LabeledAdjacencyList + fr, to.To = -1, -1 + var temp, perm Bits + var df func(NI) + df = func(n NI) { + switch { + case temp.Bit(n) == 1: + cyclic = true + return + case perm.Bit(n) == 1: + return + } + temp.SetBit(n, 1) + for _, nb := range a[n] { + df(nb.To) + if cyclic { + if fr < 0 { + fr, to = n, nb + } + return + } + } + temp.SetBit(n, 0) + perm.SetBit(n, 1) + } + for n := range a { + if perm.Bit(NI(n)) == 1 { + continue + } + if df(NI(n)); cyclic { // short circuit as soon as a cycle is found + break + } + } + return +} + +// FromList transposes a labeled graph into a FromList. +// +// Receiver g should be connected as a tree or forest. Specifically no node +// can have multiple incoming arcs. If any node n in g has multiple incoming +// arcs, the method returns (nil, n) where n is a node with multiple +// incoming arcs. +// +// Otherwise (normally) the method populates the From members in a +// FromList.Path and returns the FromList and -1. +// +// Other members of the FromList are left as zero values. +// Use FromList.RecalcLen and FromList.RecalcLeaves as needed. +// +// Unusual cases are parallel arcs and loops. A parallel arc represents +// a case of multiple arcs going to some node and so will lead to a (nil, n) +// return, even though a graph might be considered a multigraph tree. +// A single loop on a node that would otherwise be a root node, though, +// is not a case of multiple incoming arcs and so does not force a (nil, n) +// result. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) FromList() (*FromList, NI) { + paths := make([]PathEnd, len(g.LabeledAdjacencyList)) + for i := range paths { + paths[i].From = -1 + } + for fr, to := range g.LabeledAdjacencyList { + for _, to := range to { + if paths[to.To].From >= 0 { + return nil, to.To + } + paths[to.To].From = NI(fr) + } + } + return &FromList{Paths: paths}, -1 +} + +// InDegree computes the in-degree of each node in g +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) InDegree() []int { + ind := make([]int, len(g.LabeledAdjacencyList)) + for _, nbs := range g.LabeledAdjacencyList { + for _, nb := range nbs { + ind[nb.To]++ + } + } + return ind +} + +// IsTree identifies trees in directed graphs. +// +// Return value isTree is true if the subgraph reachable from root is a tree. +// Further, return value allTree is true if the entire graph g is reachable +// from root. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) IsTree(root NI) (isTree, allTree bool) { + a := g.LabeledAdjacencyList + var v Bits + v.SetAll(len(a)) + var df func(NI) bool + df = func(n NI) bool { + if v.Bit(n) == 0 { + return false + } + v.SetBit(n, 0) + for _, to := range a[n] { + if !df(to.To) { + return false + } + } + return true + } + isTree = df(root) + return isTree, isTree && v.Zero() +} + +// Tarjan identifies strongly connected components in a directed graph using +// Tarjan's algorithm. +// +// The method calls the emit argument for each component identified. Each +// component is a list of nodes. A property of the algorithm is that +// components are emitted in reverse topological order of the condensation. +// (See https://en.wikipedia.org/wiki/Strongly_connected_component#Definitions +// for description of condensation.) +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also TarjanForward and TarjanCondensation. +func (g LabeledDirected) Tarjan(emit func([]NI) bool) { + // See "Depth-first search and linear graph algorithms", Robert Tarjan, + // SIAM J. Comput. Vol. 1, No. 2, June 1972. + // + // Implementation here from Wikipedia pseudocode, + // http://en.wikipedia.org/w/index.php?title=Tarjan%27s_strongly_connected_components_algorithm&direction=prev&oldid=647184742 + var indexed, stacked Bits + a := g.LabeledAdjacencyList + index := make([]int, len(a)) + lowlink := make([]int, len(a)) + x := 0 + var S []NI + var sc func(NI) bool + sc = func(n NI) bool { + index[n] = x + indexed.SetBit(n, 1) + lowlink[n] = x + x++ + S = append(S, n) + stacked.SetBit(n, 1) + for _, nb := range a[n] { + if indexed.Bit(nb.To) == 0 { + if !sc(nb.To) { + return false + } + if lowlink[nb.To] < lowlink[n] { + lowlink[n] = lowlink[nb.To] + } + } else if stacked.Bit(nb.To) == 1 { + if index[nb.To] < lowlink[n] { + lowlink[n] = index[nb.To] + } + } + } + if lowlink[n] == index[n] { + var c []NI + for { + last := len(S) - 1 + w := S[last] + S = S[:last] + stacked.SetBit(w, 0) + c = append(c, w) + if w == n { + if !emit(c) { + return false + } + break + } + } + } + return true + } + for n := range a { + if indexed.Bit(NI(n)) == 0 && !sc(NI(n)) { + return + } + } +} + +// TarjanForward returns strongly connected components. +// +// It returns components in the reverse order of Tarjan, for situations +// where a forward topological ordering is easier. +func (g LabeledDirected) TarjanForward() [][]NI { + var r [][]NI + g.Tarjan(func(c []NI) bool { + r = append(r, c) + return true + }) + scc := make([][]NI, len(r)) + last := len(r) - 1 + for i, ci := range r { + scc[last-i] = ci + } + return scc +} + +// TarjanCondensation returns strongly connected components and their +// condensation graph. +// +// Components are ordered in a forward topological ordering. +func (g LabeledDirected) TarjanCondensation() (scc [][]NI, cd AdjacencyList) { + scc = g.TarjanForward() + cd = make(AdjacencyList, len(scc)) // return value + cond := make([]NI, len(g.LabeledAdjacencyList)) // mapping from g node to cd node + for cn := NI(len(scc) - 1); cn >= 0; cn-- { + c := scc[cn] + for _, n := range c { + cond[n] = NI(cn) // map g node to cd node + } + var tos []NI // list of 'to' nodes + var m Bits // tos map + m.SetBit(cn, 1) + for _, n := range c { + for _, to := range g.LabeledAdjacencyList[n] { + if ct := cond[to.To]; m.Bit(ct) == 0 { + m.SetBit(ct, 1) + tos = append(tos, ct) + } + } + } + cd[cn] = tos + } + return +} + +// Topological computes a topological ordering of a directed acyclic graph. +// +// For an acyclic graph, return value ordering is a permutation of node numbers +// in topologically sorted order and cycle will be nil. If the graph is found +// to be cyclic, ordering will be nil and cycle will be the path of a found +// cycle. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) Topological() (ordering, cycle []NI) { + a := g.LabeledAdjacencyList + ordering = make([]NI, len(a)) + i := len(ordering) + var temp, perm Bits + var cycleFound bool + var cycleStart NI + var df func(NI) + df = func(n NI) { + switch { + case temp.Bit(n) == 1: + cycleFound = true + cycleStart = n + return + case perm.Bit(n) == 1: + return + } + temp.SetBit(n, 1) + for _, nb := range a[n] { + df(nb.To) + if cycleFound { + if cycleStart >= 0 { + // a little hack: orderng won't be needed so repurpose the + // slice as cycle. this is read out in reverse order + // as the recursion unwinds. + x := len(ordering) - 1 - len(cycle) + ordering[x] = n + cycle = ordering[x:] + if n == cycleStart { + cycleStart = -1 + } + } + return + } + } + temp.SetBit(n, 0) + perm.SetBit(n, 1) + i-- + ordering[i] = n + } + for n := range a { + if perm.Bit(NI(n)) == 1 { + continue + } + df(NI(n)) + if cycleFound { + return nil, cycle + } + } + return ordering, nil +} + +// TopologicalKahn computes a topological ordering of a directed acyclic graph. +// +// For an acyclic graph, return value ordering is a permutation of node numbers +// in topologically sorted order and cycle will be nil. If the graph is found +// to be cyclic, ordering will be nil and cycle will be the path of a found +// cycle. +// +// This function is based on the algorithm by Arthur Kahn and requires the +// transpose of g be passed as the argument. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledDirected) TopologicalKahn(tr Directed) (ordering, cycle []NI) { + // code follows Wikipedia pseudocode. + var L, S []NI + // rem for "remaining edges," this function makes a local copy of the + // in-degrees and consumes that instead of consuming an input. + rem := make([]int, len(g.LabeledAdjacencyList)) + for n, fr := range tr.AdjacencyList { + if len(fr) == 0 { + // accumulate "set of all nodes with no incoming edges" + S = append(S, NI(n)) + } else { + // initialize rem from in-degree + rem[n] = len(fr) + } + } + for len(S) > 0 { + last := len(S) - 1 // "remove a node n from S" + n := S[last] + S = S[:last] + L = append(L, n) // "add n to tail of L" + for _, m := range g.LabeledAdjacencyList[n] { + // WP pseudo code reads "for each node m..." but it means for each + // node m *remaining in the graph.* We consume rem rather than + // the graph, so "remaining in the graph" for us means rem[m] > 0. + if rem[m.To] > 0 { + rem[m.To]-- // "remove edge from the graph" + if rem[m.To] == 0 { // if "m has no other incoming edges" + S = append(S, m.To) // "insert m into S" + } + } + } + } + // "If graph has edges," for us means a value in rem is > 0. + for c, in := range rem { + if in > 0 { + // recover cyclic nodes + for _, nb := range g.LabeledAdjacencyList[c] { + if rem[nb.To] > 0 { + cycle = append(cycle, NI(c)) + break + } + } + } + } + if len(cycle) > 0 { + return nil, cycle + } + return L, nil +} diff --git a/vendor/github.com/soniakeys/graph/doc.go b/vendor/github.com/soniakeys/graph/doc.go new file mode 100644 index 00000000..6d072789 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/doc.go @@ -0,0 +1,128 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +// Graph algorithms: Dijkstra, A*, Bellman Ford, Floyd Warshall; +// Kruskal and Prim minimal spanning tree; topological sort and DAG longest +// and shortest paths; Eulerian cycle and path; degeneracy and k-cores; +// Bron Kerbosch clique finding; connected components; and others. +// +// This is a graph library of integer indexes. To use it with application +// data, you associate data with integer indexes, perform searches or other +// operations with the library, and then use the integer index results to refer +// back to your application data. +// +// Thus it does not store application data, pointers to application data, +// or require you to implement an interface on your application data. +// The idea is to keep the library methods fast and lean. +// +// Representation overview +// +// The package defines a type for a node index (NI) which is just an integer +// type. It defines types for a number of number graph representations using +// NI. The fundamental graph type is AdjacencyList, which is the +// common "list of lists" graph representation. It is a list as a slice +// with one element for each node of the graph. Each element is a list +// itself, a list of neighbor nodes, implemented as an NI slice. Methods +// on an AdjacencyList generally work on any representable graph, including +// directed or undirected graphs, simple graphs or multigraphs. +// +// The type Undirected embeds an AdjacencyList adding methods specific to +// undirected graphs. Similarly the type Directed adds methods meaningful +// for directed graphs. +// +// Similar to NI, the type LI is a "label index" which labels a +// node-to-neighbor "arc" or edge. Just as an NI can index arbitrary node +// data, an LI can index arbitrary arc or edge data. A number of algorithms +// use a "weight" associated with an arc. This package does not represent +// weighted arcs explicitly, but instead uses the LI as a more general +// mechanism allowing not only weights but arbitrary data to be associated +// with arcs. While AdjacencyList represents an arc with simply an NI, +// the type LabeledAdjacencyList uses a type that pairs an NI with an LI. +// This type is named Half, for half-arc. (A full arc would represent +// both ends.) Types LabeledDirected and LabeledUndirected embed a +// LabeledAdjacencyList. +// +// In contrast to Half, the type Edge represents both ends of an edge (but +// no label.) The type LabeledEdge adds the label. The type WeightedEdgeList +// bundles a list of LabeledEdges with a WeightFunc. WeightedEdgeList is +// currently only used by Kruskal methods. +// +// FromList is a compact rooted tree (or forest) respresentation. Like +// AdjacencyList and LabeledAdjacencyList, it is a list with one element for +// each node of the graph. Each element contains only a single neighbor +// however, its parent in the tree, the "from" node. +// +// Code generation +// +// A number of methods on AdjacencyList, Directed, and Undirected are +// applicable to LabeledAdjacencyList, LabeledDirected, and LabeledUndirected +// simply by ignoring the label. In these cases code generation provides +// methods on both types from a single source implementation. These methods +// are documented with the sentence "There are equivalent labeled and unlabeled +// versions of this method" and examples are provided only for the unlabeled +// version. +// +// Terminology +// +// This package uses the term "node" rather than "vertex." It uses "arc" +// to mean a directed edge, and uses "from" and "to" to refer to the ends +// of an arc. It uses "start" and "end" to refer to endpoints of a search +// or traversal. +// +// The usage of "to" and "from" is perhaps most strange. In common speech +// they are prepositions, but throughout this package they are used as +// adjectives, for example to refer to the "from node" of an arc or the +// "to node". The type "FromList" is named to indicate it stores a list of +// "from" values. +// +// A "half arc" refers to just one end of an arc, either the to or from end. +// +// Two arcs are "reciprocal" if they connect two distinct nodes n1 and n2, +// one arc leading from n1 to n2 and the other arc leading from n2 to n1. +// Undirected graphs are represented with reciprocal arcs. +// +// A node with an arc to itself represents a "loop." Duplicate arcs, where +// a node has multiple arcs to another node, are termed "parallel arcs." +// A graph with no loops or parallel arcs is "simple." A graph that allows +// parallel arcs is a "multigraph" +// +// The "size" of a graph traditionally means the number of undirected edges. +// This package uses "arc size" to mean the number of arcs in a graph. For an +// undirected graph without loops, arc size is 2 * size. +// +// The "order" of a graph is the number of nodes. An "ordering" though means +// an ordered list of nodes. +// +// A number of graph search algorithms use a concept of arc "weights." +// The sum of arc weights along a path is a "distance." In contrast, the +// number of nodes in a path, including start and end nodes, is the path's +// "length." (Yes, mixing weights and lengths would be nonsense physically, +// but the terms used here are just distinct terms for abstract values. +// The actual meaning to an application is likely to be something else +// entirely and is not relevant within this package.) +// +// Finally, this package documentation takes back the word "object" in some +// places to refer to a Go value, especially a value of a type with methods. +// +// Shortest path searches +// +// This package implements a number of shortest path searches. Most work +// with weighted graphs that are directed or undirected, and with graphs +// that may have loops or parallel arcs. For weighted graphs, "shortest" +// is defined as the path distance (sum of arc weights) with path length +// (number of nodes) breaking ties. If multiple paths have the same minimum +// distance with the same minimum length, search methods are free to return +// any of them. +// +// Type name Description, methods +// BreadthFirst Unweigted arcs, traversal, single path search or all paths. +// BreadthFirst2 Direction-optimizing variant of BreadthFirst. +// Dijkstra Non-negative arc weights, single or all paths. +// AStar Non-negative arc weights, heuristic guided, single path. +// BellmanFord Negative arc weights allowed, no negative cycles, all paths. +// DAGPath O(n) algorithm for DAGs, arc weights of any sign. +// FloydWarshall all pairs distances, no negative cycles. +// +// These searches typically have one method that is full-featured and +// then a convenience method with a simpler API targeting a simpler use case. +package graph diff --git a/vendor/github.com/soniakeys/graph/fromlist.go b/vendor/github.com/soniakeys/graph/fromlist.go new file mode 100644 index 00000000..31d41fa1 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/fromlist.go @@ -0,0 +1,418 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// FromList represents a rooted tree (or forest) where each node is associated +// with a half arc identifying an arc "from" another node. +// +// Other terms for this data structure include "parent list", +// "predecessor list", "in-tree", "inverse arborescence", and +// "spaghetti stack." +// +// The Paths member represents the tree structure. Leaves and MaxLen are +// not always needed. Where Leaves is used it serves as a bitmap where +// Leaves.Bit(n) == 1 for each leaf n of the tree. Where MaxLen is used it is +// provided primarily as a convenience for functions that might want to +// anticipate the maximum path length that would be encountered traversing +// the tree. +// +// Various graph search methods use a FromList to returns search results. +// For a start node of a search, From will be -1 and Len will be 1. For other +// nodes reached by the search, From represents a half arc in a path back to +// start and Len represents the number of nodes in the path. For nodes not +// reached by the search, From will be -1 and Len will be 0. +// +// A single FromList can also represent a forest. In this case paths from +// all leaves do not return to a single root node, but multiple root nodes. +// +// While a FromList generally encodes a tree or forest, it is technically +// possible to encode a cyclic graph. A number of FromList methods require +// the receiver to be acyclic. Graph methods documented to return a tree or +// forest will never return a cyclic FromList. In other cases however, +// where a FromList is not known to by cyclic, the Cyclic method can be +// useful to validate the acyclic property. +type FromList struct { + Paths []PathEnd // tree representation + Leaves Bits // leaves of tree + MaxLen int // length of longest path, max of all PathEnd.Len values +} + +// PathEnd associates a half arc and a path length. +// +// A PathEnd list is an element type of FromList. +type PathEnd struct { + From NI // a "from" half arc, the node the arc comes from + Len int // number of nodes in path from start +} + +// NewFromList creates a FromList object of given order. +// +// The Paths member is allocated to length n but there is no other +// initialization. +func NewFromList(n int) FromList { + return FromList{Paths: make([]PathEnd, n)} +} + +// BoundsOk validates the "from" values in the list. +// +// Negative values are allowed as they indicate root nodes. +// +// BoundsOk returns true when all from values are less than len(t). +// Otherwise it returns false and a node with a from value >= len(t). +func (f FromList) BoundsOk() (ok bool, n NI) { + for n, e := range f.Paths { + if int(e.From) >= len(f.Paths) { + return false, NI(n) + } + } + return true, -1 +} + +// CommonStart returns the common start node of minimal paths to a and b. +// +// It returns -1 if a and b cannot be traced back to a common node. +// +// The method relies on populated PathEnd.Len members. Use RecalcLen if +// the Len members are not known to be present and correct. +func (f FromList) CommonStart(a, b NI) NI { + p := f.Paths + if p[a].Len < p[b].Len { + a, b = b, a + } + for bl := p[b].Len; p[a].Len > bl; { + a = p[a].From + if a < 0 { + return -1 + } + } + for a != b { + a = p[a].From + if a < 0 { + return -1 + } + b = p[b].From + } + return a +} + +// Cyclic determines if f contains a cycle, a non-empty path from a node +// back to itself. +// +// Cyclic returns true if g contains at least one cycle. It also returns +// an example of a node involved in a cycle. +// +// Cyclic returns (false, -1) in the normal case where f is acyclic. +// Note that the bool is not an "ok" return. A cyclic FromList is usually +// not okay. +func (f FromList) Cyclic() (cyclic bool, n NI) { + var vis Bits + p := f.Paths + for i := range p { + var path Bits + for n := NI(i); vis.Bit(n) == 0; { + vis.SetBit(n, 1) + path.SetBit(n, 1) + if n = p[n].From; n < 0 { + break + } + if path.Bit(n) == 1 { + return true, n + } + } + } + return false, -1 +} + +// IsolatedNodeBits returns a bitmap of isolated nodes in receiver graph f. +// +// An isolated node is one with no arcs going to or from it. +func (f FromList) IsolatedNodes() (iso Bits) { + p := f.Paths + iso.SetAll(len(p)) + for n, e := range p { + if e.From >= 0 { + iso.SetBit(NI(n), 0) + iso.SetBit(e.From, 0) + } + } + return +} + +// PathTo decodes a FromList, recovering a single path. +// +// The path is returned as a list of nodes where the first element will be +// a root node and the last element will be the specified end node. +// +// Only the Paths member of the receiver is used. Other members of the +// FromList do not need to be valid, however the MaxLen member can be useful +// for allocating argument p. +// +// Argument p can provide the result slice. If p has capacity for the result +// it will be used, otherwise a new slice is created for the result. +// +// See also function PathTo. +func (f FromList) PathTo(end NI, p []NI) []NI { + return PathTo(f.Paths, end, p) +} + +// PathTo decodes a single path from a PathEnd list. +// +// A PathEnd list is the main data representation in a FromList. See FromList. +// +// PathTo returns a list of nodes where the first element will be +// a root node and the last element will be the specified end node. +// +// Argument p can provide the result slice. If p has capacity for the result +// it will be used, otherwise a new slice is created for the result. +// +// See also method FromList.PathTo. +func PathTo(paths []PathEnd, end NI, p []NI) []NI { + n := paths[end].Len + if n == 0 { + return nil + } + if cap(p) >= n { + p = p[:n] + } else { + p = make([]NI, n) + } + for { + n-- + p[n] = end + if n == 0 { + return p + } + end = paths[end].From + } +} + +// Preorder traverses f calling Visitor v in preorder. +// +// Nodes are visited in order such that for any node n with from node fr, +// fr is visited before n. Where f represents a tree, the visit ordering +// corresponds to a preordering, or depth first traversal of the tree. +// Where f represents a forest, the preorderings of the trees can be +// intermingled. +// +// Leaves must be set correctly first. Use RecalcLeaves if leaves are not +// known to be set correctly. FromList f cannot be cyclic. +// +// Traversal continues while v returns true. It terminates if v returns false. +// Preorder returns true if it completes without v returning false. Preorder +// returns false if traversal is terminated by v returning false. +func (f FromList) Preorder(v OkNodeVisitor) bool { + p := f.Paths + var done Bits + var df func(NI) bool + df = func(n NI) bool { + done.SetBit(n, 1) + if fr := p[n].From; fr >= 0 && done.Bit(fr) == 0 { + df(fr) + } + return v(n) + } + for n := range f.Paths { + p[n].Len = 0 + } + return f.Leaves.Iterate(func(n NI) bool { + return df(n) + }) +} + +// RecalcLeaves recomputes the Leaves member of f. +func (f *FromList) RecalcLeaves() { + p := f.Paths + lv := &f.Leaves + lv.SetAll(len(p)) + for n := range f.Paths { + if fr := p[n].From; fr >= 0 { + lv.SetBit(fr, 0) + } + } +} + +// RecalcLen recomputes Len for each path end, and recomputes MaxLen. +// +// RecalcLen relies on the Leaves member being valid. If it is not known +// to be valid, call RecalcLeaves before calling RecalcLen. +func (f *FromList) RecalcLen() { + p := f.Paths + var setLen func(NI) int + setLen = func(n NI) int { + switch { + case p[n].Len > 0: + return p[n].Len + case p[n].From < 0: + p[n].Len = 1 + return 1 + } + l := 1 + setLen(p[n].From) + p[n].Len = l + return l + } + for n := range f.Paths { + p[n].Len = 0 + } + f.MaxLen = 0 + f.Leaves.Iterate(func(n NI) bool { + if l := setLen(NI(n)); l > f.MaxLen { + f.MaxLen = l + } + return true + }) +} + +// ReRoot reorients the tree containing n to make n the root node. +// +// It keeps the tree connected by "reversing" the path from n to the old root. +// +// After ReRoot, the Leaves and Len members are invalid. +// Call RecalcLeaves or RecalcLen as needed. +func (f *FromList) ReRoot(n NI) { + p := f.Paths + fr := p[n].From + if fr < 0 { + return + } + p[n].From = -1 + for { + ff := p[fr].From + p[fr].From = n + if ff < 0 { + return + } + n = fr + fr = ff + } +} + +// Root finds the root of a node in a FromList. +func (f FromList) Root(n NI) NI { + for p := f.Paths; ; { + fr := p[n].From + if fr < 0 { + return n + } + n = fr + } +} + +// Transpose constructs the directed graph corresponding to FromList f +// but with arcs in the opposite direction. That is, from roots toward leaves. +// +// The method relies only on the From member of f.Paths. Other members of +// the FromList are not used. +// +// See FromList.TransposeRoots for a version that also accumulates and returns +// information about the roots. +func (f FromList) Transpose() Directed { + g := make(AdjacencyList, len(f.Paths)) + for n, p := range f.Paths { + if p.From == -1 { + continue + } + g[p.From] = append(g[p.From], NI(n)) + } + return Directed{g} +} + +// TransposeLabeled constructs the directed labeled graph corresponding +// to FromList f but with arcs in the opposite direction. That is, from +// roots toward leaves. +// +// The argument labels can be nil. In this case labels are generated matching +// the path indexes. This corresponds to the "to", or child node. +// +// If labels is non-nil, it must be the same length as f.Paths and is used +// to look up label numbers by the path index. +// +// The method relies only on the From member of f.Paths. Other members of +// the FromList are not used. +// +// See FromList.TransposeLabeledRoots for a version that also accumulates +// and returns information about the roots. +func (f FromList) TransposeLabeled(labels []LI) LabeledDirected { + g := make(LabeledAdjacencyList, len(f.Paths)) + for n, p := range f.Paths { + if p.From == -1 { + continue + } + l := LI(n) + if labels != nil { + l = labels[n] + } + g[p.From] = append(g[p.From], Half{NI(n), l}) + } + return LabeledDirected{g} +} + +// TransposeLabeledRoots constructs the labeled directed graph corresponding +// to FromList f but with arcs in the opposite direction. That is, from +// roots toward leaves. +// +// TransposeLabeledRoots also returns a count of roots of the resulting forest +// and a bitmap of the roots. +// +// The argument labels can be nil. In this case labels are generated matching +// the path indexes. This corresponds to the "to", or child node. +// +// If labels is non-nil, it must be the same length as t.Paths and is used +// to look up label numbers by the path index. +// +// The method relies only on the From member of f.Paths. Other members of +// the FromList are not used. +// +// See FromList.TransposeLabeled for a simpler verstion that returns the +// forest only. +func (f FromList) TransposeLabeledRoots(labels []LI) (forest LabeledDirected, nRoots int, roots Bits) { + p := f.Paths + nRoots = len(p) + roots.SetAll(len(p)) + g := make(LabeledAdjacencyList, len(p)) + for i, p := range f.Paths { + if p.From == -1 { + continue + } + l := LI(i) + if labels != nil { + l = labels[i] + } + n := NI(i) + g[p.From] = append(g[p.From], Half{n, l}) + if roots.Bit(n) == 1 { + roots.SetBit(n, 0) + nRoots-- + } + } + return LabeledDirected{g}, nRoots, roots +} + +// TransposeRoots constructs the directed graph corresponding to FromList f +// but with arcs in the opposite direction. That is, from roots toward leaves. +// +// TransposeRoots also returns a count of roots of the resulting forest and +// a bitmap of the roots. +// +// The method relies only on the From member of f.Paths. Other members of +// the FromList are not used. +// +// See FromList.Transpose for a simpler verstion that returns the forest only. +func (f FromList) TransposeRoots() (forest Directed, nRoots int, roots Bits) { + p := f.Paths + nRoots = len(p) + roots.SetAll(len(p)) + g := make(AdjacencyList, len(p)) + for i, e := range p { + if e.From == -1 { + continue + } + n := NI(i) + g[e.From] = append(g[e.From], n) + if roots.Bit(n) == 1 { + roots.SetBit(n, 0) + nRoots-- + } + } + return Directed{g}, nRoots, roots +} diff --git a/vendor/github.com/soniakeys/graph/graph.go b/vendor/github.com/soniakeys/graph/graph.go new file mode 100644 index 00000000..a2044e9a --- /dev/null +++ b/vendor/github.com/soniakeys/graph/graph.go @@ -0,0 +1,181 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// graph.go contains type definitions for all graph types and components. +// Also, go generate directives for source transformations. +// +// For readability, the types are defined in a dependency order: +// +// NI +// NodeList +// AdjacencyList +// Directed +// Undirected +// LI +// Half +// LabeledAdjacencyList +// LabeledDirected +// LabeledUndirected +// Edge +// LabeledEdge +// WeightFunc +// WeightedEdgeList + +//go:generate cp adj_cg.go adj_RO.go +//go:generate gofmt -r "LabeledAdjacencyList -> AdjacencyList" -w adj_RO.go +//go:generate gofmt -r "n.To -> n" -w adj_RO.go +//go:generate gofmt -r "Half -> NI" -w adj_RO.go + +//go:generate cp dir_cg.go dir_RO.go +//go:generate gofmt -r "LabeledDirected -> Directed" -w dir_RO.go +//go:generate gofmt -r "LabeledAdjacencyList -> AdjacencyList" -w dir_RO.go +//go:generate gofmt -r "n.To -> n" -w dir_RO.go +//go:generate gofmt -r "Half -> NI" -w dir_RO.go + +//go:generate cp undir_cg.go undir_RO.go +//go:generate gofmt -r "LabeledUndirected -> Undirected" -w undir_RO.go +//go:generate gofmt -r "LabeledAdjacencyList -> AdjacencyList" -w undir_RO.go +//go:generate gofmt -r "n.To -> n" -w undir_RO.go +//go:generate gofmt -r "Half -> NI" -w undir_RO.go + +// NI is a "node int" +// +// It is a node number or node ID. NIs are used extensively as slice indexes. +// NIs typically account for a significant fraction of the memory footprint of +// a graph. +type NI int32 + +// NodeList satisfies sort.Interface. +type NodeList []NI + +func (l NodeList) Len() int { return len(l) } +func (l NodeList) Less(i, j int) bool { return l[i] < l[j] } +func (l NodeList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } + +// An AdjacencyList represents a graph as a list of neighbors for each node. +// The "node ID" of a node is simply it's slice index in the AdjacencyList. +// For an AdjacencyList g, g[n] represents arcs going from node n to nodes +// g[n]. +// +// Adjacency lists are inherently directed but can be used to represent +// directed or undirected graphs. See types Directed and Undirected. +type AdjacencyList [][]NI + +// Directed represents a directed graph. +// +// Directed methods generally rely on the graph being directed, specifically +// that arcs do not have reciprocals. +type Directed struct { + AdjacencyList // embedded to include AdjacencyList methods +} + +// Undirected represents an undirected graph. +// +// In an undirected graph, for each arc between distinct nodes there is also +// a reciprocal arc, an arc in the opposite direction. Loops do not have +// reciprocals. +// +// Undirected methods generally rely on the graph being undirected, +// specifically that every arc between distinct nodes has a reciprocal. +type Undirected struct { + AdjacencyList // embedded to include AdjacencyList methods +} + +// LI is a label integer, used for associating labels with arcs. +type LI int32 + +// Half is a half arc, representing a labeled arc and the "neighbor" node +// that the arc leads to. +// +// Halfs can be composed to form a labeled adjacency list. +type Half struct { + To NI // node ID, usable as a slice index + Label LI // half-arc ID for application data, often a weight +} + +// A LabeledAdjacencyList represents a graph as a list of neighbors for each +// node, connected by labeled arcs. +// +// Arc labels are not necessarily unique arc IDs. Different arcs can have +// the same label. +// +// Arc labels are commonly used to assocate a weight with an arc. Arc labels +// are general purpose however and can be used to associate arbitrary +// information with an arc. +// +// Methods implementing weighted graph algorithms will commonly take a +// weight function that turns a label int into a float64 weight. +// +// If only a small amount of information -- such as an integer weight or +// a single printable character -- needs to be associated, it can sometimes +// be possible to encode the information directly into the label int. For +// more generality, some lookup scheme will be needed. +// +// In an undirected labeled graph, reciprocal arcs must have identical labels. +// Note this does not preclude parallel arcs with different labels. +type LabeledAdjacencyList [][]Half + +// LabeledDirected represents a directed labeled graph. +// +// This is the labeled version of Directed. See types LabeledAdjacencyList +// and Directed. +type LabeledDirected struct { + LabeledAdjacencyList // embedded to include LabeledAdjacencyList methods +} + +// LabeledUndirected represents an undirected labeled graph. +// +// This is the labeled version of Undirected. See types LabeledAdjacencyList +// and Undirected. +type LabeledUndirected struct { + LabeledAdjacencyList // embedded to include LabeledAdjacencyList methods +} + +// Edge is an undirected edge between nodes N1 and N2. +type Edge struct{ N1, N2 NI } + +// LabeledEdge is an undirected edge with an associated label. +type LabeledEdge struct { + Edge + LI +} + +// WeightFunc returns a weight for a given label. +// +// WeightFunc is a parameter type for various search functions. The intent +// is to return a weight corresponding to an arc label. The name "weight" +// is an abstract term. An arc "weight" will typically have some application +// specific meaning other than physical weight. +type WeightFunc func(label LI) (weight float64) + +// WeightedEdgeList is a graph representation. +// +// It is a labeled edge list, with an associated weight function to return +// a weight given an edge label. +// +// Also associated is the order, or number of nodes of the graph. +// All nodes occurring in the edge list must be strictly less than Order. +// +// WeigtedEdgeList sorts by weight, obtained by calling the weight function. +// If weight computation is expensive, consider supplying a cached or +// memoized version. +type WeightedEdgeList struct { + Order int + WeightFunc + Edges []LabeledEdge +} + +// Len implements sort.Interface. +func (l WeightedEdgeList) Len() int { return len(l.Edges) } + +// Less implements sort.Interface. +func (l WeightedEdgeList) Less(i, j int) bool { + return l.WeightFunc(l.Edges[i].LI) < l.WeightFunc(l.Edges[j].LI) +} + +// Swap implements sort.Interface. +func (l WeightedEdgeList) Swap(i, j int) { + l.Edges[i], l.Edges[j] = l.Edges[j], l.Edges[i] +} diff --git a/vendor/github.com/soniakeys/graph/hacking.md b/vendor/github.com/soniakeys/graph/hacking.md new file mode 100644 index 00000000..30d2d7c5 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/hacking.md @@ -0,0 +1,37 @@ +#Hacking + +Basic use of the package is just go get, or git clone; go install. There are +no dependencies outside the standard library. + +The primary to-do list is the issue tracker on Github. I maintained a +journal on google drive for a while but at some point filed issues for all +remaining ideas in that document that still seemed relevant. So currently +there is no other roadmap or planning document. + +CI is currently on travis-ci.org. The .travis.yml builds for go 1.2.1 +following https://github.com/soniakeys/graph/issues/49, and it currently builds +for go 1.6 as well. The travis script calls a shell script right away because +I didn’t see a way to get it to do different steps for the different go +versions. For 1.2.1, I just wanted the basic tests. For a current go version +such as 1.6, there’s a growing list of checks. + +The GOARCH=386 test is for https://github.com/soniakeys/graph/issues/41. +The problem is the architecture specific code in bits32.go and bits64.go. +Yes, there are architecture independent algorithms. There is also assembly +to access machine instructions. Anyway, it’s the way it is for now. + +Im not big on making go vet happy just for a badge but I really like the +example check that I believe appeared with go 1.6. (I think it will be a +standard check with 1.7, so the test script will have to change then.) + +https://github.com/client9/misspell has been valuable. + +Also I wrote https://github.com/soniakeys/vetc to validate that each source +file has copyright/license statement. + +Then, it’s not in the ci script, but I wrote https://github.com/soniakeys/rcv +to put coverage stats in the readme. Maybe it could be commit hook or +something but for now I’ll try just running it manually now and then. + +Go fmt is not in the ci script, but I have at least one editor set up to run +it on save, so code should stay formatted pretty well. diff --git a/vendor/github.com/soniakeys/graph/mst.go b/vendor/github.com/soniakeys/graph/mst.go new file mode 100644 index 00000000..028e680c --- /dev/null +++ b/vendor/github.com/soniakeys/graph/mst.go @@ -0,0 +1,244 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +import ( + "container/heap" + "sort" +) + +type dsElement struct { + from NI + rank int +} + +type disjointSet struct { + set []dsElement +} + +func newDisjointSet(n int) disjointSet { + set := make([]dsElement, n) + for i := range set { + set[i].from = -1 + } + return disjointSet{set} +} + +// return true if disjoint trees were combined. +// false if x and y were already in the same tree. +func (ds disjointSet) union(x, y NI) bool { + xr := ds.find(x) + yr := ds.find(y) + if xr == yr { + return false + } + switch xe, ye := &ds.set[xr], &ds.set[yr]; { + case xe.rank < ye.rank: + xe.from = yr + case xe.rank == ye.rank: + xe.rank++ + fallthrough + default: + ye.from = xr + } + return true +} + +func (ds disjointSet) find(n NI) NI { + // fast paths for n == root or from root. + // no updates need in these cases. + s := ds.set + fr := s[n].from + if fr < 0 { // n is root + return n + } + n, fr = fr, s[fr].from + if fr < 0 { // n is from root + return n + } + // otherwise updates needed. + // two iterative passes (rather than recursion or stack) + // pass 1: find root + r := fr + for { + f := s[r].from + if f < 0 { + break + } + r = f + } + // pass 2: update froms + for { + s[n].from = r + if fr == r { + return r + } + n = fr + fr = s[n].from + } +} + +// Kruskal implements Kruskal's algorithm for constructing a minimum spanning +// forest on an undirected graph. +// +// While the input graph is interpreted as undirected, the receiver edge list +// does not actually need to contain reciprocal arcs. A property of the +// algorithm is that arc direction is ignored. Thus only a single arc out of +// a reciprocal pair must be present in the edge list. Reciprocal arcs (and +// parallel arcs) are allowed though, and do not affect the result. +// +// The forest is returned as an undirected graph. +// +// Also returned is a total distance for the returned forest. +// +// The edge list of the receiver is sorted as a side effect of this method. +// See KruskalSorted for a version that relies on the edge list being already +// sorted. +func (l WeightedEdgeList) Kruskal() (g LabeledUndirected, dist float64) { + sort.Sort(l) + return l.KruskalSorted() +} + +// KruskalSorted implements Kruskal's algorithm for constructing a minimum +// spanning tree on an undirected graph. +// +// While the input graph is interpreted as undirected, the receiver edge list +// does not actually need to contain reciprocal arcs. A property of the +// algorithm is that arc direction is ignored. Thus only a single arc out of +// a reciprocal pair must be present in the edge list. Reciprocal arcs (and +// parallel arcs) are allowed though, and do not affect the result. +// +// When called, the edge list of the receiver must be already sorted by weight. +// See Kruskal for a version that accepts an unsorted edge list. +// +// The forest is returned as an undirected graph. +// +// Also returned is a total distance for the returned forest. +func (l WeightedEdgeList) KruskalSorted() (g LabeledUndirected, dist float64) { + ds := newDisjointSet(l.Order) + g.LabeledAdjacencyList = make(LabeledAdjacencyList, l.Order) + for _, e := range l.Edges { + if ds.union(e.N1, e.N2) { + g.AddEdge(Edge{e.N1, e.N2}, e.LI) + dist += l.WeightFunc(e.LI) + } + } + return +} + +// Prim implements the JarnĂ­k-Prim-Dijkstra algorithm for constructing +// a minimum spanning tree on an undirected graph. +// +// Prim computes a minimal spanning tree on the connected component containing +// the given start node. The tree is returned in FromList f. Argument f +// cannot be a nil pointer although it can point to a zero value FromList. +// +// If the passed FromList.Paths has the len of g though, it will be reused. +// In the case of a graph with multiple connected components, this allows a +// spanning forest to be accumulated by calling Prim successively on +// representative nodes of the components. In this case if leaves for +// individual trees are of interest, pass a non-nil zero-value for the argument +// componentLeaves and it will be populated with leaves for the single tree +// spanned by the call. +// +// If argument labels is non-nil, it must have the same length as g and will +// be populated with labels corresponding to the edges of the tree. +// +// Returned are the number of nodes spanned for the single tree (which will be +// the order of the connected component) and the total spanned distance for the +// single tree. +func (g LabeledUndirected) Prim(start NI, w WeightFunc, f *FromList, labels []LI, componentLeaves *Bits) (numSpanned int, dist float64) { + al := g.LabeledAdjacencyList + if len(f.Paths) != len(al) { + *f = NewFromList(len(al)) + } + b := make([]prNode, len(al)) // "best" + for n := range b { + b[n].nx = NI(n) + b[n].fx = -1 + } + rp := f.Paths + var frontier prHeap + rp[start] = PathEnd{From: -1, Len: 1} + numSpanned = 1 + fLeaves := &f.Leaves + fLeaves.SetBit(start, 1) + if componentLeaves != nil { + componentLeaves.SetBit(start, 1) + } + for a := start; ; { + for _, nb := range al[a] { + if rp[nb.To].Len > 0 { + continue // already in MST, no action + } + switch bp := &b[nb.To]; { + case bp.fx == -1: // new node for frontier + bp.from = fromHalf{From: a, Label: nb.Label} + bp.wt = w(nb.Label) + heap.Push(&frontier, bp) + case w(nb.Label) < bp.wt: // better arc + bp.from = fromHalf{From: a, Label: nb.Label} + bp.wt = w(nb.Label) + heap.Fix(&frontier, bp.fx) + } + } + if len(frontier) == 0 { + break // done + } + bp := heap.Pop(&frontier).(*prNode) + a = bp.nx + rp[a].Len = rp[bp.from.From].Len + 1 + rp[a].From = bp.from.From + if len(labels) != 0 { + labels[a] = bp.from.Label + } + dist += bp.wt + fLeaves.SetBit(bp.from.From, 0) + fLeaves.SetBit(a, 1) + if componentLeaves != nil { + componentLeaves.SetBit(bp.from.From, 0) + componentLeaves.SetBit(a, 1) + } + numSpanned++ + } + return +} + +// fromHalf is a half arc, representing a labeled arc and the "neighbor" node +// that the arc originates from. +// +// (This used to be exported when there was a LabeledFromList. Currently +// unexported now that it seems to have much more limited use.) +type fromHalf struct { + From NI + Label LI +} + +type prNode struct { + nx NI + from fromHalf + wt float64 // p.Weight(from.Label) + fx int +} + +type prHeap []*prNode + +func (h prHeap) Len() int { return len(h) } +func (h prHeap) Less(i, j int) bool { return h[i].wt < h[j].wt } +func (h prHeap) Swap(i, j int) { + h[i], h[j] = h[j], h[i] + h[i].fx = i + h[j].fx = j +} +func (p *prHeap) Push(x interface{}) { + nd := x.(*prNode) + nd.fx = len(*p) + *p = append(*p, nd) +} +func (p *prHeap) Pop() interface{} { + r := *p + last := len(r) - 1 + *p = r[:last] + return r[last] +} diff --git a/vendor/github.com/soniakeys/graph/random.go b/vendor/github.com/soniakeys/graph/random.go new file mode 100644 index 00000000..99f04458 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/random.go @@ -0,0 +1,325 @@ +// Copyright 2016 Sonia Keys +// License MIT: https://opensource.org/licenses/MIT + +package graph + +import ( + "errors" + "math" + "math/rand" + "time" +) + +// Euclidean generates a random simple graph on the Euclidean plane. +// +// Nodes are associated with coordinates uniformly distributed on a unit +// square. Arcs are added between random nodes with a bias toward connecting +// nearer nodes. +// +// Unfortunately the function has a few "knobs". +// The returned graph will have order nNodes and arc size nArcs. The affinity +// argument controls the bias toward connecting nearer nodes. The function +// selects random pairs of nodes as a candidate arc then rejects the candidate +// if the nodes fail an affinity test. Also parallel arcs are rejected. +// As more affine or denser graphs are requested, rejections increase, +// increasing run time. The patience argument controls the number of arc +// rejections allowed before the function gives up and returns an error. +// Note that higher affinity will require more patience and that some +// combinations of nNodes and nArcs cannot be achieved with any amount of +// patience given that the returned graph must be simple. +// +// If Rand r is nil, the method creates a new source and generator for +// one-time use. +// +// Returned is a directed simple graph and associated positions indexed by +// node number. +// +// See also LabeledEuclidean. +func Euclidean(nNodes, nArcs int, affinity float64, patience int, r *rand.Rand) (g Directed, pos []struct{ X, Y float64 }, err error) { + a := make(AdjacencyList, nNodes) // graph + // generate random positions + if r == nil { + r = rand.New(rand.NewSource(time.Now().UnixNano())) + } + pos = make([]struct{ X, Y float64 }, nNodes) + for i := range pos { + pos[i].X = r.Float64() + pos[i].Y = r.Float64() + } + // arcs + var tooFar, dup int +arc: + for i := 0; i < nArcs; { + if tooFar == nArcs*patience { + err = errors.New("affinity not found") + return + } + if dup == nArcs*patience { + err = errors.New("overcrowding") + return + } + n1 := NI(r.Intn(nNodes)) + var n2 NI + for { + n2 = NI(r.Intn(nNodes)) + if n2 != n1 { // no graph loops + break + } + } + c1 := &pos[n1] + c2 := &pos[n2] + dist := math.Hypot(c2.X-c1.X, c2.Y-c1.Y) + if dist*affinity > r.ExpFloat64() { // favor near nodes + tooFar++ + continue + } + for _, nb := range a[n1] { + if nb == n2 { // no parallel arcs + dup++ + continue arc + } + } + a[n1] = append(a[n1], n2) + i++ + } + g = Directed{a} + return +} + +// LabeledEuclidean generates a random simple graph on the Euclidean plane. +// +// Arc label values in the returned graph g are indexes into the return value +// wt. Wt is the Euclidean distance between the from and to nodes of the arc. +// +// Otherwise the function arguments and return values are the same as for +// function Euclidean. See Euclidean. +func LabeledEuclidean(nNodes, nArcs int, affinity float64, patience int, r *rand.Rand) (g LabeledDirected, pos []struct{ X, Y float64 }, wt []float64, err error) { + a := make(LabeledAdjacencyList, nNodes) // graph + wt = make([]float64, nArcs) // arc weights + // generate random positions + if r == nil { + r = rand.New(rand.NewSource(time.Now().UnixNano())) + } + pos = make([]struct{ X, Y float64 }, nNodes) + for i := range pos { + pos[i].X = r.Float64() + pos[i].Y = r.Float64() + } + // arcs + var tooFar, dup int +arc: + for i := 0; i < nArcs; { + if tooFar == nArcs*patience { + err = errors.New("affinity not found") + return + } + if dup == nArcs*patience { + err = errors.New("overcrowding") + return + } + n1 := NI(r.Intn(nNodes)) + var n2 NI + for { + n2 = NI(r.Intn(nNodes)) + if n2 != n1 { // no graph loops + break + } + } + c1 := &pos[n1] + c2 := &pos[n2] + dist := math.Hypot(c2.X-c1.X, c2.Y-c1.Y) + if dist*affinity > r.ExpFloat64() { // favor near nodes + tooFar++ + continue + } + for _, nb := range a[n1] { + if nb.To == n2 { // no parallel arcs + dup++ + continue arc + } + } + wt[i] = dist + a[n1] = append(a[n1], Half{n2, LI(i)}) + i++ + } + g = LabeledDirected{a} + return +} + +// Geometric generates a random geometric graph (RGG) on the Euclidean plane. +// +// An RGG is an undirected simple graph. Nodes are associated with coordinates +// uniformly distributed on a unit square. Edges are added between all nodes +// falling within a specified distance or radius of each other. +// +// The resulting number of edges is somewhat random but asymptotically +// approaches m = Ď€r²n²/2. The method accumulates and returns the actual +// number of edges constructed. +// +// If Rand r is nil, the method creates a new source and generator for +// one-time use. +// +// See also LabeledGeometric. +func Geometric(nNodes int, radius float64, r *rand.Rand) (g Undirected, pos []struct{ X, Y float64 }, m int) { + // Expected degree is approximately nĎ€r². + a := make(AdjacencyList, nNodes) + if r == nil { + r = rand.New(rand.NewSource(time.Now().UnixNano())) + } + pos = make([]struct{ X, Y float64 }, nNodes) + for i := range pos { + pos[i].X = r.Float64() + pos[i].Y = r.Float64() + } + for u, up := range pos { + for v := u + 1; v < len(pos); v++ { + vp := pos[v] + if math.Hypot(up.X-vp.X, up.Y-vp.Y) < radius { + a[u] = append(a[u], NI(v)) + a[v] = append(a[v], NI(u)) + m++ + } + } + } + g = Undirected{a} + return +} + +// LabeledGeometric generates a random geometric graph (RGG) on the Euclidean +// plane. +// +// Edge label values in the returned graph g are indexes into the return value +// wt. Wt is the Euclidean distance between nodes of the edge. The graph +// size m is len(wt). +// +// See Geometric for additional description. +func LabeledGeometric(nNodes int, radius float64, r *rand.Rand) (g LabeledUndirected, pos []struct{ X, Y float64 }, wt []float64) { + a := make(LabeledAdjacencyList, nNodes) + if r == nil { + r = rand.New(rand.NewSource(time.Now().UnixNano())) + } + pos = make([]struct{ X, Y float64 }, nNodes) + for i := range pos { + pos[i].X = r.Float64() + pos[i].Y = r.Float64() + } + for u, up := range pos { + for v := u + 1; v < len(pos); v++ { + vp := pos[v] + if w := math.Hypot(up.X-vp.X, up.Y-vp.Y); w < radius { + a[u] = append(a[u], Half{NI(v), LI(len(wt))}) + a[v] = append(a[v], Half{NI(u), LI(len(wt))}) + wt = append(wt, w) + } + } + } + g = LabeledUndirected{a} + return +} + +// KroneckerDirected generates a Kronecker-like random directed graph. +// +// The returned graph g is simple and has no isolated nodes but is not +// necessarily fully connected. The number of of nodes will be <= 2^scale, +// and will be near 2^scale for typical values of arcFactor, >= 2. +// ArcFactor * 2^scale arcs are generated, although loops and duplicate arcs +// are rejected. +// +// If Rand r is nil, the method creates a new source and generator for +// one-time use. +// +// Return value ma is the number of arcs retained in the result graph. +func KroneckerDirected(scale uint, arcFactor float64, r *rand.Rand) (g Directed, ma int) { + a, m := kronecker(scale, arcFactor, true, r) + return Directed{a}, m +} + +// KroneckerUndirected generates a Kronecker-like random undirected graph. +// +// The returned graph g is simple and has no isolated nodes but is not +// necessarily fully connected. The number of of nodes will be <= 2^scale, +// and will be near 2^scale for typical values of edgeFactor, >= 2. +// EdgeFactor * 2^scale edges are generated, although loops and duplicate edges +// are rejected. +// +// If Rand r is nil, the method creates a new source and generator for +// one-time use. +// +// Return value m is the true number of edges--not arcs--retained in the result +// graph. +func KroneckerUndirected(scale uint, edgeFactor float64, r *rand.Rand) (g Undirected, m int) { + al, s := kronecker(scale, edgeFactor, false, r) + return Undirected{al}, s +} + +// Styled after the Graph500 example code. Not well tested currently. +// Graph500 example generates undirected only. No idea if the directed variant +// here is meaningful or not. +// +// note mma returns arc size ma for dir=true, but returns size m for dir=false +func kronecker(scale uint, edgeFactor float64, dir bool, r *rand.Rand) (g AdjacencyList, mma int) { + if r == nil { + r = rand.New(rand.NewSource(time.Now().UnixNano())) + } + N := NI(1 << scale) // node extent + M := int(edgeFactor*float64(N) + .5) // number of arcs/edges to generate + a, b, c := 0.57, 0.19, 0.19 // initiator probabilities + ab := a + b + cNorm := c / (1 - ab) + aNorm := a / ab + ij := make([][2]NI, M) + var bm Bits + var nNodes int + for k := range ij { + var i, j NI + for b := NI(1); b < N; b <<= 1 { + if r.Float64() > ab { + i |= b + if r.Float64() > cNorm { + j |= b + } + } else if r.Float64() > aNorm { + j |= b + } + } + if bm.Bit(i) == 0 { + bm.SetBit(i, 1) + nNodes++ + } + if bm.Bit(j) == 0 { + bm.SetBit(j, 1) + nNodes++ + } + r := r.Intn(k + 1) // shuffle edges as they are generated + ij[k] = ij[r] + ij[r] = [2]NI{i, j} + } + p := r.Perm(nNodes) // mapping to shuffle IDs of non-isolated nodes + px := 0 + rn := make([]NI, N) + for i := range rn { + if bm.Bit(NI(i)) == 1 { + rn[i] = NI(p[px]) // fill lookup table + px++ + } + } + g = make(AdjacencyList, nNodes) +ij: + for _, e := range ij { + if e[0] == e[1] { + continue // skip loops + } + ri, rj := rn[e[0]], rn[e[1]] + for _, nb := range g[ri] { + if nb == rj { + continue ij // skip parallel edges + } + } + g[ri] = append(g[ri], rj) + mma++ + if !dir { + g[rj] = append(g[rj], ri) + } + } + return +} diff --git a/vendor/github.com/soniakeys/graph/readme.md b/vendor/github.com/soniakeys/graph/readme.md new file mode 100644 index 00000000..539670ff --- /dev/null +++ b/vendor/github.com/soniakeys/graph/readme.md @@ -0,0 +1,38 @@ +#Graph + +A graph library with goals of speed and simplicity, Graph implements +graph algorithms on graphs of zero-based integer node IDs. + +[![GoDoc](https://godoc.org/github.com/soniakeys/graph?status.svg)](https://godoc.org/github.com/soniakeys/graph) [![Go Walker](http://gowalker.org/api/v1/badge)](https://gowalker.org/github.com/soniakeys/graph) [![GoSearch](http://go-search.org/badge?id=github.com%2Fsoniakeys%2Fgraph)](http://go-search.org/view?id=github.com%2Fsoniakeys%2Fgraph)[![Build Status](https://travis-ci.org/soniakeys/graph.svg?branch=master)](https://travis-ci.org/soniakeys/graph) + +Status, 4 Apr 2016: The repo has benefitted recently from being included +in another package. In response to users of that package, this repo now +builds for 32 bit Windows and ARM, and for Go versions back to 1.2.1. +Thank you all who have filed issues. + +###Non-source files of interest + +The directory [tutorials](tutorials) is a work in progress - there are only +a couple of tutorials there yet - but the concept is to provide some topical +walk-throughs to supplement godoc. The source-based godoc documentation +remains the primary documentation. + +* [Dijkstra's algorithm](tutorials/dijkstra.md) +* [AdjacencyList types](tutorials/adjacencylist.md) + +The directory [bench](bench) is another work in progress. The concept is +to present some plots showing benchmark performance approaching some +theoretical asymptote. + +[hacking.md](hacking.md) has some information about how the library is +developed, built, and tested. It might be of interest if for example you +plan to fork or contribute to the the repository. + +###Test coverage +8 Apr 2016 +``` +graph 95.3% +graph/df 20.7% +graph/dot 77.5% +graph/treevis 79.4% +``` diff --git a/vendor/github.com/soniakeys/graph/sssp.go b/vendor/github.com/soniakeys/graph/sssp.go new file mode 100644 index 00000000..32cc192e --- /dev/null +++ b/vendor/github.com/soniakeys/graph/sssp.go @@ -0,0 +1,881 @@ +// Copyright 2013 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +import ( + "container/heap" + "fmt" + "math" +) + +// rNode holds data for a "reached" node +type rNode struct { + nx NI + state int8 // state constants defined below + f float64 // "g+h", path dist + heuristic estimate + fx int // heap.Fix index +} + +// for rNode.state +const ( + unreached = 0 + reached = 1 + open = 1 + closed = 2 +) + +type openHeap []*rNode + +// A Heuristic is defined on a specific end node. The function +// returns an estimate of the path distance from node argument +// "from" to the end node. Two subclasses of heuristics are "admissible" +// and "monotonic." +// +// Admissible means the value returned is guaranteed to be less than or +// equal to the actual shortest path distance from the node to end. +// +// An admissible estimate may further be monotonic. +// Monotonic means that for any neighboring nodes A and B with half arc aB +// leading from A to B, and for heuristic h defined on some end node, then +// h(A) <= aB.ArcWeight + h(B). +// +// See AStarA for additional notes on implementing heuristic functions for +// AStar search methods. +type Heuristic func(from NI) float64 + +// Admissible returns true if heuristic h is admissible on graph g relative to +// the given end node. +// +// If h is inadmissible, the string result describes a counter example. +func (h Heuristic) Admissible(g LabeledAdjacencyList, w WeightFunc, end NI) (bool, string) { + // invert graph + inv := make(LabeledAdjacencyList, len(g)) + for from, nbs := range g { + for _, nb := range nbs { + inv[nb.To] = append(inv[nb.To], + Half{To: NI(from), Label: nb.Label}) + } + } + // run dijkstra + // Dijkstra.AllPaths takes a start node but after inverting the graph + // argument end now represents the start node of the inverted graph. + f, dist, _ := inv.Dijkstra(end, -1, w) + // compare h to found shortest paths + for n := range inv { + if f.Paths[n].Len == 0 { + continue // no path, any heuristic estimate is fine. + } + if !(h(NI(n)) <= dist[n]) { + return false, fmt.Sprintf("h(%d) = %g, "+ + "required to be <= found shortest path (%g)", + n, h(NI(n)), dist[n]) + } + } + return true, "" +} + +// Monotonic returns true if heuristic h is monotonic on weighted graph g. +// +// If h is non-monotonic, the string result describes a counter example. +func (h Heuristic) Monotonic(g LabeledAdjacencyList, w WeightFunc) (bool, string) { + // precompute + hv := make([]float64, len(g)) + for n := range g { + hv[n] = h(NI(n)) + } + // iterate over all edges + for from, nbs := range g { + for _, nb := range nbs { + arcWeight := w(nb.Label) + if !(hv[from] <= arcWeight+hv[nb.To]) { + return false, fmt.Sprintf("h(%d) = %g, "+ + "required to be <= arc weight + h(%d) (= %g + %g = %g)", + from, hv[from], + nb.To, arcWeight, hv[nb.To], arcWeight+hv[nb.To]) + } + } + } + return true, "" +} + +// AStarA finds a path between two nodes. +// +// AStarA implements both algorithm A and algorithm A*. The difference in the +// two algorithms is strictly in the heuristic estimate returned by argument h. +// If h is an "admissible" heuristic estimate, then the algorithm is termed A*, +// otherwise it is algorithm A. +// +// Like Dijkstra's algorithm, AStarA with an admissible heuristic finds the +// shortest path between start and end. AStarA generally runs faster than +// Dijkstra though, by using the heuristic distance estimate. +// +// AStarA with an inadmissible heuristic becomes algorithm A. Algorithm A +// will find a path, but it is not guaranteed to be the shortest path. +// The heuristic still guides the search however, so a nearly admissible +// heuristic is likely to find a very good path, if not the best. Quality +// of the path returned degrades gracefully with the quality of the heuristic. +// +// The heuristic function h should ideally be fairly inexpensive. AStarA +// may call it more than once for the same node, especially as graph density +// increases. In some cases it may be worth the effort to memoize or +// precompute values. +// +// Argument g is the graph to be searched, with arc weights returned by w. +// As usual for AStar, arc weights must be non-negative. +// Graphs may be directed or undirected. +// +// If AStarA finds a path it returns a FromList encoding the path, the arc +// labels for path nodes, the total path distance, and ok = true. +// Otherwise it returns ok = false. +func (g LabeledAdjacencyList) AStarA(w WeightFunc, start, end NI, h Heuristic) (f FromList, labels []LI, dist float64, ok bool) { + // NOTE: AStarM is largely duplicate code. + + f = NewFromList(len(g)) + labels = make([]LI, len(g)) + d := make([]float64, len(g)) + r := make([]rNode, len(g)) + for i := range r { + r[i].nx = NI(i) + } + // start node is reached initially + cr := &r[start] + cr.state = reached + cr.f = h(start) // total path estimate is estimate from start + rp := f.Paths + rp[start] = PathEnd{Len: 1, From: -1} // path length at start is 1 node + // oh is a heap of nodes "open" for exploration. nodes go on the heap + // when they get an initial or new "g" path distance, and therefore a + // new "f" which serves as priority for exploration. + oh := openHeap{cr} + for len(oh) > 0 { + bestPath := heap.Pop(&oh).(*rNode) + bestNode := bestPath.nx + if bestNode == end { + return f, labels, d[end], true + } + bp := &rp[bestNode] + nextLen := bp.Len + 1 + for _, nb := range g[bestNode] { + alt := &r[nb.To] + ap := &rp[alt.nx] + // "g" path distance from start + g := d[bestNode] + w(nb.Label) + if alt.state == reached { + if g > d[nb.To] { + // candidate path to nb is longer than some alternate path + continue + } + if g == d[nb.To] && nextLen >= ap.Len { + // candidate path has identical length of some alternate + // path but it takes no fewer hops. + continue + } + // cool, we found a better way to get to this node. + // record new path data for this node and + // update alt with new data and make sure it's on the heap. + *ap = PathEnd{From: bestNode, Len: nextLen} + labels[nb.To] = nb.Label + d[nb.To] = g + alt.f = g + h(nb.To) + if alt.fx < 0 { + heap.Push(&oh, alt) + } else { + heap.Fix(&oh, alt.fx) + } + } else { + // bestNode being reached for the first time. + *ap = PathEnd{From: bestNode, Len: nextLen} + labels[nb.To] = nb.Label + d[nb.To] = g + alt.f = g + h(nb.To) + alt.state = reached + heap.Push(&oh, alt) // and it's now open for exploration + } + } + } + return // no path +} + +// AStarAPath finds a shortest path using the AStarA algorithm. +// +// This is a convenience method with a simpler result than the AStarA method. +// See documentation on the AStarA method. +// +// If a path is found, the non-nil node path is returned with the total path +// distance. Otherwise the returned path will be nil. +func (g LabeledAdjacencyList) AStarAPath(start, end NI, h Heuristic, w WeightFunc) ([]NI, float64) { + f, _, d, _ := g.AStarA(w, start, end, h) + return f.PathTo(end, nil), d +} + +// AStarM is AStarA optimized for monotonic heuristic estimates. +// +// Note that this function requires a monotonic heuristic. Results will +// not be meaningful if argument h is non-monotonic. +// +// See AStarA for general usage. See Heuristic for notes on monotonicity. +func (g LabeledAdjacencyList) AStarM(w WeightFunc, start, end NI, h Heuristic) (f FromList, labels []LI, dist float64, ok bool) { + // NOTE: AStarM is largely code duplicated from AStarA. + // Differences are noted in comments in this method. + + f = NewFromList(len(g)) + labels = make([]LI, len(g)) + d := make([]float64, len(g)) + r := make([]rNode, len(g)) + for i := range r { + r[i].nx = NI(i) + } + cr := &r[start] + + // difference from AStarA: + // instead of a bit to mark a reached node, there are two states, + // open and closed. open marks nodes "open" for exploration. + // nodes are marked open as they are reached, then marked + // closed as they are found to be on the best path. + cr.state = open + + cr.f = h(start) + rp := f.Paths + rp[start] = PathEnd{Len: 1, From: -1} + oh := openHeap{cr} + for len(oh) > 0 { + bestPath := heap.Pop(&oh).(*rNode) + bestNode := bestPath.nx + if bestNode == end { + return f, labels, d[end], true + } + + // difference from AStarA: + // move nodes to closed list as they are found to be best so far. + bestPath.state = closed + + bp := &rp[bestNode] + nextLen := bp.Len + 1 + for _, nb := range g[bestNode] { + alt := &r[nb.To] + + // difference from AStarA: + // Monotonicity means that f cannot be improved. + if alt.state == closed { + continue + } + + ap := &rp[alt.nx] + g := d[bestNode] + w(nb.Label) + + // difference from AStarA: + // test for open state, not just reached + if alt.state == open { + + if g > d[nb.To] { + continue + } + if g == d[nb.To] && nextLen >= ap.Len { + continue + } + *ap = PathEnd{From: bestNode, Len: nextLen} + labels[nb.To] = nb.Label + d[nb.To] = g + alt.f = g + h(nb.To) + + // difference from AStarA: + // we know alt was on the heap because we found it marked open + heap.Fix(&oh, alt.fx) + } else { + *ap = PathEnd{From: bestNode, Len: nextLen} + labels[nb.To] = nb.Label + d[nb.To] = g + alt.f = g + h(nb.To) + + // difference from AStarA: + // nodes are opened when first reached + alt.state = open + heap.Push(&oh, alt) + } + } + } + return +} + +// AStarMPath finds a shortest path using the AStarM algorithm. +// +// This is a convenience method with a simpler result than the AStarM method. +// See documentation on the AStarM and AStarA methods. +// +// If a path is found, the non-nil node path is returned with the total path +// distance. Otherwise the returned path will be nil. +func (g LabeledAdjacencyList) AStarMPath(start, end NI, h Heuristic, w WeightFunc) ([]NI, float64) { + f, _, d, _ := g.AStarM(w, start, end, h) + return f.PathTo(end, nil), d +} + +// implement container/heap +func (h openHeap) Len() int { return len(h) } +func (h openHeap) Less(i, j int) bool { return h[i].f < h[j].f } +func (h openHeap) Swap(i, j int) { + h[i], h[j] = h[j], h[i] + h[i].fx = i + h[j].fx = j +} +func (p *openHeap) Push(x interface{}) { + h := *p + fx := len(h) + h = append(h, x.(*rNode)) + h[fx].fx = fx + *p = h +} + +func (p *openHeap) Pop() interface{} { + h := *p + last := len(h) - 1 + *p = h[:last] + h[last].fx = -1 + return h[last] +} + +// BellmanFord finds shortest paths from a start node in a weighted directed +// graph using the Bellman-Ford-Moore algorithm. +// +// WeightFunc w must translate arc labels to arc weights. +// Negative arc weights are allowed but not negative cycles. +// Loops and parallel arcs are allowed. +// +// If the algorithm completes without encountering a negative cycle the method +// returns shortest paths encoded in a FromList, path distances indexed by +// node, and return value end = -1. +// +// If it encounters a negative cycle reachable from start it returns end >= 0. +// In this case the cycle can be obtained by calling f.BellmanFordCycle(end). +// +// Negative cycles are only detected when reachable from start. A negative +// cycle not reachable from start will not prevent the algorithm from finding +// shortest paths from start. +// +// See also NegativeCycle to find a cycle anywhere in the graph, and see +// HasNegativeCycle for lighter-weight negative cycle detection, +func (g LabeledDirected) BellmanFord(w WeightFunc, start NI) (f FromList, dist []float64, end NI) { + a := g.LabeledAdjacencyList + f = NewFromList(len(a)) + dist = make([]float64, len(a)) + inf := math.Inf(1) + for i := range dist { + dist[i] = inf + } + rp := f.Paths + rp[start] = PathEnd{Len: 1, From: -1} + dist[start] = 0 + for _ = range a[1:] { + imp := false + for from, nbs := range a { + fp := &rp[from] + d1 := dist[from] + for _, nb := range nbs { + d2 := d1 + w(nb.Label) + to := &rp[nb.To] + // TODO improve to break ties + if fp.Len > 0 && d2 < dist[nb.To] { + *to = PathEnd{From: NI(from), Len: fp.Len + 1} + dist[nb.To] = d2 + imp = true + } + } + } + if !imp { + break + } + } + for from, nbs := range a { + d1 := dist[from] + for _, nb := range nbs { + if d1+w(nb.Label) < dist[nb.To] { + // return nb as end of a path with negative cycle at root + return f, dist, NI(from) + } + } + } + return f, dist, -1 +} + +// BellmanFordCycle decodes a negative cycle detected by BellmanFord. +// +// Receiver f and argument end must be results returned from BellmanFord. +func (f FromList) BellmanFordCycle(end NI) (c []NI) { + p := f.Paths + var b Bits + for b.Bit(end) == 0 { + b.SetBit(end, 1) + end = p[end].From + } + for b.Bit(end) == 1 { + c = append(c, end) + b.SetBit(end, 0) + end = p[end].From + } + for i, j := 0, len(c)-1; i < j; i, j = i+1, j-1 { + c[i], c[j] = c[j], c[i] + } + return +} + +// HasNegativeCycle returns true if the graph contains any negative cycle. +// +// HasNegativeCycle uses a Bellman-Ford-like algorithm, but finds negative +// cycles anywhere in the graph. Also path information is not computed, +// reducing memory use somewhat compared to BellmanFord. +// +// See also NegativeCycle to obtain the cycle, and see BellmanFord for +// single source shortest path searches. +func (g LabeledDirected) HasNegativeCycle(w WeightFunc) bool { + a := g.LabeledAdjacencyList + dist := make([]float64, len(a)) + for _ = range a[1:] { + imp := false + for from, nbs := range a { + d1 := dist[from] + for _, nb := range nbs { + d2 := d1 + w(nb.Label) + if d2 < dist[nb.To] { + dist[nb.To] = d2 + imp = true + } + } + } + if !imp { + break + } + } + for from, nbs := range a { + d1 := dist[from] + for _, nb := range nbs { + if d1+w(nb.Label) < dist[nb.To] { + return true // negative cycle + } + } + } + return false +} + +// NegativeCycle finds a negative cycle if one exists. +// +// NegativeCycle uses a Bellman-Ford-like algorithm, but finds negative +// cycles anywhere in the graph. If a negative cycle exists, one will be +// returned. The result is nil if no negative cycle exists. +// +// See also HasNegativeCycle for lighter-weight cycle detection, and see +// BellmanFord for single source shortest paths. +func (g LabeledDirected) NegativeCycle(w WeightFunc) (c []NI) { + a := g.LabeledAdjacencyList + f := NewFromList(len(a)) + p := f.Paths + for n := range p { + p[n] = PathEnd{From: -1, Len: 1} + } + dist := make([]float64, len(a)) + for _ = range a { + imp := false + for from, nbs := range a { + fp := &p[from] + d1 := dist[from] + for _, nb := range nbs { + d2 := d1 + w(nb.Label) + to := &p[nb.To] + if fp.Len > 0 && d2 < dist[nb.To] { + *to = PathEnd{From: NI(from), Len: fp.Len + 1} + dist[nb.To] = d2 + imp = true + } + } + } + if !imp { + return nil + } + } + var vis Bits +a: + for n := range a { + end := NI(n) + var b Bits + for b.Bit(end) == 0 { + if vis.Bit(end) == 1 { + continue a + } + vis.SetBit(end, 1) + b.SetBit(end, 1) + end = p[end].From + if end < 0 { + continue a + } + } + for b.Bit(end) == 1 { + c = append(c, end) + b.SetBit(end, 0) + end = p[end].From + } + for i, j := 0, len(c)-1; i < j; i, j = i+1, j-1 { + c[i], c[j] = c[j], c[i] + } + return c + } + return nil // no negative cycle +} + +// A NodeVisitor is an argument to some graph traversal methods. +// +// Graph traversal methods call the visitor function for each node visited. +// Argument n is the node being visited. +type NodeVisitor func(n NI) + +// An OkNodeVisitor function is an argument to some graph traversal methods. +// +// Graph traversal methods call the visitor function for each node visited. +// The argument n is the node being visited. If the visitor function +// returns true, the traversal will continue. If the visitor function +// returns false, the traversal will terminate immediately. +type OkNodeVisitor func(n NI) (ok bool) + +// BreadthFirst2 traverses a graph breadth first using a direction +// optimizing algorithm. +// +// The code is experimental and currently seems no faster than the +// conventional breadth first code. +// +// Use AdjacencyList.BreadthFirst instead. +func BreadthFirst2(g, tr AdjacencyList, ma int, start NI, f *FromList, v OkNodeVisitor) int { + if tr == nil { + var d Directed + d, ma = Directed{g}.Transpose() + tr = d.AdjacencyList + } + switch { + case f == nil: + e := NewFromList(len(g)) + f = &e + case f.Paths == nil: + *f = NewFromList(len(g)) + } + if ma <= 0 { + ma = g.ArcSize() + } + rp := f.Paths + level := 1 + rp[start] = PathEnd{Len: level, From: -1} + if !v(start) { + f.MaxLen = level + return -1 + } + nReached := 1 // accumulated for a return value + // the frontier consists of nodes all at the same level + frontier := []NI{start} + mf := len(g[start]) // number of arcs leading out from frontier + ctb := ma / 10 // threshold change from top-down to bottom-up + k14 := 14 * ma / len(g) // 14 * mean degree + cbt := len(g) / k14 // threshold change from bottom-up to top-down + // var fBits, nextb big.Int + fBits := make([]bool, len(g)) + nextb := make([]bool, len(g)) + zBits := make([]bool, len(g)) + for { + // top down step + level++ + var next []NI + for _, n := range frontier { + for _, nb := range g[n] { + if rp[nb].Len == 0 { + rp[nb] = PathEnd{From: n, Len: level} + if !v(nb) { + f.MaxLen = level + return -1 + } + next = append(next, nb) + nReached++ + } + } + } + if len(next) == 0 { + break + } + frontier = next + if mf > ctb { + // switch to bottom up! + } else { + // stick with top down + continue + } + // convert frontier representation + nf := 0 // number of vertices on the frontier + for _, n := range frontier { + // fBits.SetBit(&fBits, n, 1) + fBits[n] = true + nf++ + } + bottomUpLoop: + level++ + nNext := 0 + for n := range tr { + if rp[n].Len == 0 { + for _, nb := range tr[n] { + // if fBits.Bit(nb) == 1 { + if fBits[nb] { + rp[n] = PathEnd{From: nb, Len: level} + if !v(nb) { + f.MaxLen = level + return -1 + } + // nextb.SetBit(&nextb, n, 1) + nextb[n] = true + nReached++ + nNext++ + break + } + } + } + } + if nNext == 0 { + break + } + fBits, nextb = nextb, fBits + // nextb.SetInt64(0) + copy(nextb, zBits) + nf = nNext + if nf < cbt { + // switch back to top down! + } else { + // stick with bottom up + goto bottomUpLoop + } + // convert frontier representation + mf = 0 + frontier = frontier[:0] + for n := range g { + // if fBits.Bit(n) == 1 { + if fBits[n] { + frontier = append(frontier, NI(n)) + mf += len(g[n]) + fBits[n] = false + } + } + // fBits.SetInt64(0) + } + f.MaxLen = level - 1 + return nReached +} + +// DAGMinDistPath finds a single shortest path. +// +// Shortest means minimum sum of arc weights. +// +// Returned is the path and distance as returned by FromList.PathTo. +// +// This is a convenience method. See DAGOptimalPaths for more options. +func (g LabeledDirected) DAGMinDistPath(start, end NI, w WeightFunc) ([]NI, float64, error) { + return g.dagPath(start, end, w, false) +} + +// DAGMaxDistPath finds a single longest path. +// +// Longest means maximum sum of arc weights. +// +// Returned is the path and distance as returned by FromList.PathTo. +// +// This is a convenience method. See DAGOptimalPaths for more options. +func (g LabeledDirected) DAGMaxDistPath(start, end NI, w WeightFunc) ([]NI, float64, error) { + return g.dagPath(start, end, w, true) +} + +func (g LabeledDirected) dagPath(start, end NI, w WeightFunc, longest bool) ([]NI, float64, error) { + o, _ := g.Topological() + if o == nil { + return nil, 0, fmt.Errorf("not a DAG") + } + f, dist, _ := g.DAGOptimalPaths(start, end, o, w, longest) + if f.Paths[end].Len == 0 { + return nil, 0, fmt.Errorf("no path from %d to %d", start, end) + } + return f.PathTo(end, nil), dist[end], nil +} + +// DAGOptimalPaths finds either longest or shortest distance paths in a +// directed acyclic graph. +// +// Path distance is the sum of arc weights on the path. +// Negative arc weights are allowed. +// Where multiple paths exist with the same distance, the path length +// (number of nodes) is used as a tie breaker. +// +// Receiver g must be a directed acyclic graph. Argument o must be either nil +// or a topological ordering of g. If nil, a topologcal ordering is +// computed internally. If longest is true, an optimal path is a longest +// distance path. Otherwise it is a shortest distance path. +// +// Argument start is the start node for paths, end is the end node. If end +// is a valid node number, the method returns as soon as the optimal path +// to end is found. If end is -1, all optimal paths from start are found. +// +// Paths and path distances are encoded in the returned FromList and dist +// slice. The number of nodes reached is returned as nReached. +func (g LabeledDirected) DAGOptimalPaths(start, end NI, ordering []NI, w WeightFunc, longest bool) (f FromList, dist []float64, nReached int) { + a := g.LabeledAdjacencyList + f = NewFromList(len(a)) + dist = make([]float64, len(a)) + if ordering == nil { + ordering, _ = g.Topological() + } + // search ordering for start + o := 0 + for ordering[o] != start { + o++ + } + var fBetter func(cand, ext float64) bool + var iBetter func(cand, ext int) bool + if longest { + fBetter = func(cand, ext float64) bool { return cand > ext } + iBetter = func(cand, ext int) bool { return cand > ext } + } else { + fBetter = func(cand, ext float64) bool { return cand < ext } + iBetter = func(cand, ext int) bool { return cand < ext } + } + p := f.Paths + p[start] = PathEnd{From: -1, Len: 1} + f.MaxLen = 1 + leaves := &f.Leaves + leaves.SetBit(start, 1) + nReached = 1 + for n := start; n != end; n = ordering[o] { + if p[n].Len > 0 && len(a[n]) > 0 { + nDist := dist[n] + candLen := p[n].Len + 1 // len for any candidate arc followed from n + for _, to := range a[n] { + leaves.SetBit(to.To, 1) + candDist := nDist + w(to.Label) + switch { + case p[to.To].Len == 0: // first path to node to.To + nReached++ + case fBetter(candDist, dist[to.To]): // better distance + case candDist == dist[to.To] && iBetter(candLen, p[to.To].Len): // same distance but better path length + default: + continue + } + dist[to.To] = candDist + p[to.To] = PathEnd{From: n, Len: candLen} + if candLen > f.MaxLen { + f.MaxLen = candLen + } + } + leaves.SetBit(n, 0) + } + o++ + if o == len(ordering) { + break + } + } + return +} + +// Dijkstra finds shortest paths by Dijkstra's algorithm. +// +// Shortest means shortest distance where distance is the +// sum of arc weights. Where multiple paths exist with the same distance, +// a path with the minimum number of nodes is returned. +// +// As usual for Dijkstra's algorithm, arc weights must be non-negative. +// Graphs may be directed or undirected. Loops and parallel arcs are +// allowed. +func (g LabeledAdjacencyList) Dijkstra(start, end NI, w WeightFunc) (f FromList, dist []float64, reached int) { + r := make([]tentResult, len(g)) + for i := range r { + r[i].nx = NI(i) + } + f = NewFromList(len(g)) + dist = make([]float64, len(g)) + current := start + rp := f.Paths + rp[current] = PathEnd{Len: 1, From: -1} // path length at start is 1 node + cr := &r[current] + cr.dist = 0 // distance at start is 0. + cr.done = true // mark start done. it skips the heap. + nDone := 1 // accumulated for a return value + var t tent + for current != end { + nextLen := rp[current].Len + 1 + for _, nb := range g[current] { + // d.arcVis++ + hr := &r[nb.To] + if hr.done { + continue // skip nodes already done + } + dist := cr.dist + w(nb.Label) + vl := rp[nb.To].Len + visited := vl > 0 + if visited { + if dist > hr.dist { + continue // distance is worse + } + // tie breaker is a nice touch and doesn't seem to + // impact performance much. + if dist == hr.dist && nextLen >= vl { + continue // distance same, but number of nodes is no better + } + } + // the path through current to this node is shortest so far. + // record new path data for this node and update tentative set. + hr.dist = dist + rp[nb.To].Len = nextLen + rp[nb.To].From = current + if visited { + heap.Fix(&t, hr.fx) + } else { + heap.Push(&t, hr) + } + } + //d.ndVis++ + if len(t) == 0 { + return f, dist, nDone // no more reachable nodes. AllPaths normal return + } + // new current is node with smallest tentative distance + cr = heap.Pop(&t).(*tentResult) + cr.done = true + nDone++ + current = cr.nx + dist[current] = cr.dist // store final distance + } + // normal return for single shortest path search + return f, dist, -1 +} + +// DijkstraPath finds a single shortest path. +// +// Returned is the path and distance as returned by FromList.PathTo. +func (g LabeledAdjacencyList) DijkstraPath(start, end NI, w WeightFunc) ([]NI, float64) { + f, dist, _ := g.Dijkstra(start, end, w) + return f.PathTo(end, nil), dist[end] +} + +// tent implements container/heap +func (t tent) Len() int { return len(t) } +func (t tent) Less(i, j int) bool { return t[i].dist < t[j].dist } +func (t tent) Swap(i, j int) { + t[i], t[j] = t[j], t[i] + t[i].fx = i + t[j].fx = j +} +func (s *tent) Push(x interface{}) { + nd := x.(*tentResult) + nd.fx = len(*s) + *s = append(*s, nd) +} +func (s *tent) Pop() interface{} { + t := *s + last := len(t) - 1 + *s = t[:last] + return t[last] +} + +type tentResult struct { + dist float64 // tentative distance, sum of arc weights + nx NI // slice index, "node id" + fx int // heap.Fix index + done bool +} + +type tent []*tentResult diff --git a/vendor/github.com/soniakeys/graph/travis.sh b/vendor/github.com/soniakeys/graph/travis.sh new file mode 100644 index 00000000..5a8030ac --- /dev/null +++ b/vendor/github.com/soniakeys/graph/travis.sh @@ -0,0 +1,11 @@ +#!/bin/bash +set -ex +go test ./... +if [ "$TRAVIS_GO_VERSION" = "1.6" ]; then + GOARCH=386 go test ./... + go tool vet -example . + go get github.com/client9/misspell/cmd/misspell + go get github.com/soniakeys/vetc + misspell -error * */* */*/* + vetc +fi diff --git a/vendor/github.com/soniakeys/graph/undir.go b/vendor/github.com/soniakeys/graph/undir.go new file mode 100644 index 00000000..75a7f248 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/undir.go @@ -0,0 +1,321 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// undir.go has methods specific to undirected graphs, Undirected and +// LabeledUndirected. + +import "errors" + +// AddEdge adds an edge to a graph. +// +// It can be useful for constructing undirected graphs. +// +// When n1 and n2 are distinct, it adds the arc n1->n2 and the reciprocal +// n2->n1. When n1 and n2 are the same, it adds a single arc loop. +// +// The pointer receiver allows the method to expand the graph as needed +// to include the values n1 and n2. If n1 or n2 happen to be greater than +// len(*p) the method does not panic, but simply expands the graph. +func (p *Undirected) AddEdge(n1, n2 NI) { + // Similar code in LabeledAdjacencyList.AddEdge. + + // determine max of the two end points + max := n1 + if n2 > max { + max = n2 + } + // expand graph if needed, to include both + g := p.AdjacencyList + if int(max) >= len(g) { + p.AdjacencyList = make(AdjacencyList, max+1) + copy(p.AdjacencyList, g) + g = p.AdjacencyList + } + // create one half-arc, + g[n1] = append(g[n1], n2) + // and except for loops, create the reciprocal + if n1 != n2 { + g[n2] = append(g[n2], n1) + } +} + +// EulerianCycleD for undirected graphs is a bit of an experiment. +// +// It is about the same as the directed version, but modified for an undirected +// multigraph. +// +// Parameter m in this case must be the size of the undirected graph -- the +// number of edges. Use Undirected.Size if the size is unknown. +// +// It works, but contains an extra loop that I think spoils the time +// complexity. Probably still pretty fast in practice, but a different +// graph representation might be better. +func (g Undirected) EulerianCycleD(m int) ([]NI, error) { + if len(g.AdjacencyList) == 0 { + return nil, nil + } + e := newEulerian(g.AdjacencyList, m) + for e.s >= 0 { + v := e.top() + e.pushUndir() // call modified method + if e.top() != v { + return nil, errors.New("not balanced") + } + e.keep() + } + if !e.uv.Zero() { + return nil, errors.New("not strongly connected") + } + return e.p, nil +} + +// TarjanBiconnectedComponents decomposes a graph into maximal biconnected +// components, components for which if any node were removed the component +// would remain connected. +// +// The receiver g must be a simple graph. The method calls the emit argument +// for each component identified, as long as emit returns true. If emit +// returns false, TarjanBiconnectedComponents returns immediately. +// +// See also the eqivalent labeled TarjanBiconnectedComponents. +func (g Undirected) TarjanBiconnectedComponents(emit func([]Edge) bool) { + // Implemented closely to pseudocode in "Depth-first search and linear + // graph algorithms", Robert Tarjan, SIAM J. Comput. Vol. 1, No. 2, + // June 1972. + // + // Note Tarjan's "adjacency structure" is graph.AdjacencyList, + // His "adjacency list" is an element of a graph.AdjacencyList, also + // termed a "to-list", "neighbor list", or "child list." + number := make([]int, len(g.AdjacencyList)) + lowpt := make([]int, len(g.AdjacencyList)) + var stack []Edge + var i int + var biconnect func(NI, NI) bool + biconnect = func(v, u NI) bool { + i++ + number[v] = i + lowpt[v] = i + for _, w := range g.AdjacencyList[v] { + if number[w] == 0 { + stack = append(stack, Edge{v, w}) + if !biconnect(w, v) { + return false + } + if lowpt[w] < lowpt[v] { + lowpt[v] = lowpt[w] + } + if lowpt[w] >= number[v] { + var bcc []Edge + top := len(stack) - 1 + for number[stack[top].N1] >= number[w] { + bcc = append(bcc, stack[top]) + stack = stack[:top] + top-- + } + bcc = append(bcc, stack[top]) + stack = stack[:top] + top-- + if !emit(bcc) { + return false + } + } + } else if number[w] < number[v] && w != u { + stack = append(stack, Edge{v, w}) + if number[w] < lowpt[v] { + lowpt[v] = number[w] + } + } + } + return true + } + for w := range g.AdjacencyList { + if number[w] == 0 && !biconnect(NI(w), 0) { + return + } + } +} + +/* half-baked. Read the 72 paper. Maybe revisit at some point. +type BiconnectedComponents struct { + Graph AdjacencyList + Start int + Cuts big.Int // bitmap of node cuts + From []int // from-tree + Leaves []int // leaves of from-tree +} + +func NewBiconnectedComponents(g Undirected) *BiconnectedComponents { + return &BiconnectedComponents{ + Graph: g, + From: make([]int, len(g)), + } +} + +func (b *BiconnectedComponents) Find(start int) { + g := b.Graph + depth := make([]int, len(g)) + low := make([]int, len(g)) + // reset from any previous run + b.Cuts.SetInt64(0) + bf := b.From + for n := range bf { + bf[n] = -1 + } + b.Leaves = b.Leaves[:0] + d := 1 // depth. d > 0 means visited + depth[start] = d + low[start] = d + d++ + var df func(int, int) + df = func(from, n int) { + bf[n] = from + depth[n] = d + dn := d + l := d + d++ + cut := false + leaf := true + for _, nb := range g[n] { + if depth[nb] == 0 { + leaf = false + df(n, nb) + if low[nb] < l { + l = low[nb] + } + if low[nb] >= dn { + cut = true + } + } else if nb != from && depth[nb] < l { + l = depth[nb] + } + } + low[n] = l + if cut { + b.Cuts.SetBit(&b.Cuts, n, 1) + } + if leaf { + b.Leaves = append(b.Leaves, n) + } + d-- + } + nbs := g[start] + if len(nbs) == 0 { + return + } + df(start, nbs[0]) + var rc uint + for _, nb := range nbs[1:] { + if depth[nb] == 0 { + rc = 1 + df(start, nb) + } + } + b.Cuts.SetBit(&b.Cuts, start, rc) + return +} +*/ + +// AddEdge adds an edge to a labeled graph. +// +// It can be useful for constructing undirected graphs. +// +// When n1 and n2 are distinct, it adds the arc n1->n2 and the reciprocal +// n2->n1. When n1 and n2 are the same, it adds a single arc loop. +// +// If the edge already exists in *p, a parallel edge is added. +// +// The pointer receiver allows the method to expand the graph as needed +// to include the values n1 and n2. If n1 or n2 happen to be greater than +// len(*p) the method does not panic, but simply expands the graph. +func (p *LabeledUndirected) AddEdge(e Edge, l LI) { + // Similar code in AdjacencyList.AddEdge. + + // determine max of the two end points + max := e.N1 + if e.N2 > max { + max = e.N2 + } + // expand graph if needed, to include both + g := p.LabeledAdjacencyList + if max >= NI(len(g)) { + p.LabeledAdjacencyList = make(LabeledAdjacencyList, max+1) + copy(p.LabeledAdjacencyList, g) + g = p.LabeledAdjacencyList + } + // create one half-arc, + g[e.N1] = append(g[e.N1], Half{To: e.N2, Label: l}) + // and except for loops, create the reciprocal + if e.N1 != e.N2 { + g[e.N2] = append(g[e.N2], Half{To: e.N1, Label: l}) + } +} + +// TarjanBiconnectedComponents decomposes a graph into maximal biconnected +// components, components for which if any node were removed the component +// would remain connected. +// +// The receiver g must be a simple graph. The method calls the emit argument +// for each component identified, as long as emit returns true. If emit +// returns false, TarjanBiconnectedComponents returns immediately. +// +// See also the eqivalent unlabeled TarjanBiconnectedComponents. +func (g LabeledUndirected) TarjanBiconnectedComponents(emit func([]LabeledEdge) bool) { + // Implemented closely to pseudocode in "Depth-first search and linear + // graph algorithms", Robert Tarjan, SIAM J. Comput. Vol. 1, No. 2, + // June 1972. + // + // Note Tarjan's "adjacency structure" is graph.AdjacencyList, + // His "adjacency list" is an element of a graph.AdjacencyList, also + // termed a "to-list", "neighbor list", or "child list." + // + // Nearly identical code in undir.go. + number := make([]int, len(g.LabeledAdjacencyList)) + lowpt := make([]int, len(g.LabeledAdjacencyList)) + var stack []LabeledEdge + var i int + var biconnect func(NI, NI) bool + biconnect = func(v, u NI) bool { + i++ + number[v] = i + lowpt[v] = i + for _, w := range g.LabeledAdjacencyList[v] { + if number[w.To] == 0 { + stack = append(stack, LabeledEdge{Edge{v, w.To}, w.Label}) + if !biconnect(w.To, v) { + return false + } + if lowpt[w.To] < lowpt[v] { + lowpt[v] = lowpt[w.To] + } + if lowpt[w.To] >= number[v] { + var bcc []LabeledEdge + top := len(stack) - 1 + for number[stack[top].N1] >= number[w.To] { + bcc = append(bcc, stack[top]) + stack = stack[:top] + top-- + } + bcc = append(bcc, stack[top]) + stack = stack[:top] + top-- + if !emit(bcc) { + return false + } + } + } else if number[w.To] < number[v] && w.To != u { + stack = append(stack, LabeledEdge{Edge{v, w.To}, w.Label}) + if number[w.To] < lowpt[v] { + lowpt[v] = number[w.To] + } + } + } + return true + } + for w := range g.LabeledAdjacencyList { + if number[w] == 0 && !biconnect(NI(w), 0) { + return + } + } +} diff --git a/vendor/github.com/soniakeys/graph/undir_RO.go b/vendor/github.com/soniakeys/graph/undir_RO.go new file mode 100644 index 00000000..fd8e3778 --- /dev/null +++ b/vendor/github.com/soniakeys/graph/undir_RO.go @@ -0,0 +1,659 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// undir_RO.go is code generated from undir_cg.go by directives in graph.go. +// Editing undir_cg.go is okay. It is the code generation source. +// DO NOT EDIT undir_RO.go. +// The RO means read only and it is upper case RO to slow you down a bit +// in case you start to edit the file. + +// Bipartite determines if a connected component of an undirected graph +// is bipartite, a component where nodes can be partitioned into two sets +// such that every edge in the component goes from one set to the other. +// +// Argument n can be any representative node of the component. +// +// If the component is bipartite, Bipartite returns true and a two-coloring +// of the component. Each color set is returned as a bitmap. If the component +// is not bipartite, Bipartite returns false and a representative odd cycle. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) Bipartite(n NI) (b bool, c1, c2 Bits, oc []NI) { + b = true + var open bool + var df func(n NI, c1, c2 *Bits) + df = func(n NI, c1, c2 *Bits) { + c1.SetBit(n, 1) + for _, nb := range g.AdjacencyList[n] { + if c1.Bit(nb) == 1 { + b = false + oc = []NI{nb, n} + open = true + return + } + if c2.Bit(nb) == 1 { + continue + } + df(nb, c2, c1) + if b { + continue + } + switch { + case !open: + case n == oc[0]: + open = false + default: + oc = append(oc, n) + } + return + } + } + df(n, &c1, &c2) + if b { + return b, c1, c2, nil + } + return b, Bits{}, Bits{}, oc +} + +// BronKerbosch1 finds maximal cliques in an undirected graph. +// +// The graph must not contain parallel edges or loops. +// +// See https://en.wikipedia.org/wiki/Clique_(graph_theory) and +// https://en.wikipedia.org/wiki/Bron%E2%80%93Kerbosch_algorithm for background. +// +// This method implements the BronKerbosch1 algorithm of WP; that is, +// the original algorithm without improvements. +// +// The method calls the emit argument for each maximal clique in g, as long +// as emit returns true. If emit returns false, BronKerbosch1 returns +// immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also more sophisticated variants BronKerbosch2 and BronKerbosch3. +func (g Undirected) BronKerbosch1(emit func([]NI) bool) { + a := g.AdjacencyList + var f func(R, P, X *Bits) bool + f = func(R, P, X *Bits) bool { + switch { + case !P.Zero(): + var r2, p2, x2 Bits + pf := func(n NI) bool { + r2.Set(*R) + r2.SetBit(n, 1) + p2.Clear() + x2.Clear() + for _, to := range a[n] { + if P.Bit(to) == 1 { + p2.SetBit(to, 1) + } + if X.Bit(to) == 1 { + x2.SetBit(to, 1) + } + } + if !f(&r2, &p2, &x2) { + return false + } + P.SetBit(n, 0) + X.SetBit(n, 1) + return true + } + if !P.Iterate(pf) { + return false + } + case X.Zero(): + return emit(R.Slice()) + } + return true + } + var R, P, X Bits + P.SetAll(len(a)) + f(&R, &P, &X) +} + +// BKPivotMaxDegree is a strategy for BronKerbosch methods. +// +// To use it, take the method value (see golang.org/ref/spec#Method_values) +// and pass it as the argument to BronKerbosch2 or 3. +// +// The strategy is to pick the node from P or X with the maximum degree +// (number of edges) in g. Note this is a shortcut from evaluating degrees +// in P. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) BKPivotMaxDegree(P, X *Bits) (p NI) { + // choose pivot u as highest degree node from P or X + a := g.AdjacencyList + maxDeg := -1 + P.Iterate(func(n NI) bool { // scan P + if d := len(a[n]); d > maxDeg { + p = n + maxDeg = d + } + return true + }) + X.Iterate(func(n NI) bool { // scan X + if d := len(a[n]); d > maxDeg { + p = n + maxDeg = d + } + return true + }) + return +} + +// BKPivotMinP is a strategy for BronKerbosch methods. +// +// To use it, take the method value (see golang.org/ref/spec#Method_values) +// and pass it as the argument to BronKerbosch2 or 3. +// +// The strategy is to simply pick the first node in P. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) BKPivotMinP(P, X *Bits) NI { + return P.From(0) +} + +// BronKerbosch2 finds maximal cliques in an undirected graph. +// +// The graph must not contain parallel edges or loops. +// +// See https://en.wikipedia.org/wiki/Clique_(graph_theory) and +// https://en.wikipedia.org/wiki/Bron%E2%80%93Kerbosch_algorithm for background. +// +// This method implements the BronKerbosch2 algorithm of WP; that is, +// the original algorithm plus pivoting. +// +// The argument is a pivot function that must return a node of P or X. +// P is guaranteed to contain at least one node. X is not. +// For example see BKPivotMaxDegree. +// +// The method calls the emit argument for each maximal clique in g, as long +// as emit returns true. If emit returns false, BronKerbosch1 returns +// immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also simpler variant BronKerbosch1 and more sophisticated variant +// BronKerbosch3. +func (g Undirected) BronKerbosch2(pivot func(P, X *Bits) NI, emit func([]NI) bool) { + a := g.AdjacencyList + var f func(R, P, X *Bits) bool + f = func(R, P, X *Bits) bool { + switch { + case !P.Zero(): + var r2, p2, x2, pnu Bits + // compute P \ N(u). next 5 lines are only difference from BK1 + pnu.Set(*P) + for _, to := range a[pivot(P, X)] { + pnu.SetBit(to, 0) + } + // remaining code like BK1 + pf := func(n NI) bool { + r2.Set(*R) + r2.SetBit(n, 1) + p2.Clear() + x2.Clear() + for _, to := range a[n] { + if P.Bit(to) == 1 { + p2.SetBit(to, 1) + } + if X.Bit(to) == 1 { + x2.SetBit(to, 1) + } + } + if !f(&r2, &p2, &x2) { + return false + } + P.SetBit(n, 0) + X.SetBit(n, 1) + return true + } + if !pnu.Iterate(pf) { + return false + } + case X.Zero(): + return emit(R.Slice()) + } + return true + } + var R, P, X Bits + P.SetAll(len(a)) + f(&R, &P, &X) +} + +// BronKerbosch3 finds maximal cliques in an undirected graph. +// +// The graph must not contain parallel edges or loops. +// +// See https://en.wikipedia.org/wiki/Clique_(graph_theory) and +// https://en.wikipedia.org/wiki/Bron%E2%80%93Kerbosch_algorithm for background. +// +// This method implements the BronKerbosch3 algorithm of WP; that is, +// the original algorithm with pivoting and degeneracy ordering. +// +// The argument is a pivot function that must return a node of P or X. +// P is guaranteed to contain at least one node. X is not. +// For example see BKPivotMaxDegree. +// +// The method calls the emit argument for each maximal clique in g, as long +// as emit returns true. If emit returns false, BronKerbosch1 returns +// immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also simpler variants BronKerbosch1 and BronKerbosch2. +func (g Undirected) BronKerbosch3(pivot func(P, X *Bits) NI, emit func([]NI) bool) { + a := g.AdjacencyList + var f func(R, P, X *Bits) bool + f = func(R, P, X *Bits) bool { + switch { + case !P.Zero(): + var r2, p2, x2, pnu Bits + // compute P \ N(u). next lines are only difference from BK1 + pnu.Set(*P) + for _, to := range a[pivot(P, X)] { + pnu.SetBit(to, 0) + } + // remaining code like BK2 + pf := func(n NI) bool { + r2.Set(*R) + r2.SetBit(n, 1) + p2.Clear() + x2.Clear() + for _, to := range a[n] { + if P.Bit(to) == 1 { + p2.SetBit(to, 1) + } + if X.Bit(to) == 1 { + x2.SetBit(to, 1) + } + } + if !f(&r2, &p2, &x2) { + return false + } + P.SetBit(n, 0) + X.SetBit(n, 1) + return true + } + if !pnu.Iterate(pf) { + return false + } + case X.Zero(): + return emit(R.Slice()) + } + return true + } + var R, P, X Bits + P.SetAll(len(a)) + // code above same as BK2 + // code below new to BK3 + _, ord, _ := g.Degeneracy() + var p2, x2 Bits + for _, n := range ord { + R.SetBit(n, 1) + p2.Clear() + x2.Clear() + for _, to := range a[n] { + if P.Bit(to) == 1 { + p2.SetBit(to, 1) + } + if X.Bit(to) == 1 { + x2.SetBit(to, 1) + } + } + if !f(&R, &p2, &x2) { + return + } + R.SetBit(n, 0) + P.SetBit(n, 0) + X.SetBit(n, 1) + } +} + +// ConnectedComponentBits returns a function that iterates over connected +// components of g, returning a member bitmap for each. +// +// Each call of the returned function returns the order (number of nodes) +// and bits of a connected component. The returned function returns zeros +// after returning all connected components. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentReps, which has lighter weight return values. +func (g Undirected) ConnectedComponentBits() func() (order int, bits Bits) { + a := g.AdjacencyList + var vg Bits // nodes visited in graph + var vc *Bits // nodes visited in current component + var nc int + var df func(NI) + df = func(n NI) { + vg.SetBit(n, 1) + vc.SetBit(n, 1) + nc++ + for _, nb := range a[n] { + if vg.Bit(nb) == 0 { + df(nb) + } + } + return + } + var n NI + return func() (o int, bits Bits) { + for ; n < NI(len(a)); n++ { + if vg.Bit(n) == 0 { + vc = &bits + nc = 0 + df(n) + return nc, bits + } + } + return + } +} + +// ConnectedComponentLists returns a function that iterates over connected +// components of g, returning the member list of each. +// +// Each call of the returned function returns a node list of a connected +// component. The returned function returns nil after returning all connected +// components. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentReps, which has lighter weight return values. +func (g Undirected) ConnectedComponentLists() func() []NI { + a := g.AdjacencyList + var vg Bits // nodes visited in graph + var m []NI // members of current component + var df func(NI) + df = func(n NI) { + vg.SetBit(n, 1) + m = append(m, n) + for _, nb := range a[n] { + if vg.Bit(nb) == 0 { + df(nb) + } + } + return + } + var n NI + return func() []NI { + for ; n < NI(len(a)); n++ { + if vg.Bit(n) == 0 { + m = nil + df(n) + return m + } + } + return nil + } +} + +// ConnectedComponentReps returns a representative node from each connected +// component of g. +// +// Returned is a slice with a single representative node from each connected +// component and also a parallel slice with the order, or number of nodes, +// in the corresponding component. +// +// This is fairly minimal information describing connected components. +// From a representative node, other nodes in the component can be reached +// by depth first traversal for example. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentBits and ConnectedComponentLists which can +// collect component members in a single traversal, and IsConnected which +// is an even simpler boolean test. +func (g Undirected) ConnectedComponentReps() (reps []NI, orders []int) { + a := g.AdjacencyList + var c Bits + var o int + var df func(NI) + df = func(n NI) { + c.SetBit(n, 1) + o++ + for _, nb := range a[n] { + if c.Bit(nb) == 0 { + df(nb) + } + } + return + } + for n := range a { + if c.Bit(NI(n)) == 0 { + reps = append(reps, NI(n)) + o = 0 + df(NI(n)) + orders = append(orders, o) + } + } + return +} + +// Copy makes a deep copy of g. +// Copy also computes the arc size ma, the number of arcs. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) Copy() (c Undirected, ma int) { + l, s := g.AdjacencyList.Copy() + return Undirected{l}, s +} + +// Degeneracy computes k-degeneracy, vertex ordering and k-cores. +// +// See Wikipedia https://en.wikipedia.org/wiki/Degeneracy_(graph_theory) +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) Degeneracy() (k int, ordering []NI, cores []int) { + a := g.AdjacencyList + // WP algorithm + ordering = make([]NI, len(a)) + var L Bits + d := make([]int, len(a)) + var D [][]NI + for v, nb := range a { + dv := len(nb) + d[v] = dv + for len(D) <= dv { + D = append(D, nil) + } + D[dv] = append(D[dv], NI(v)) + } + for ox := range a { + // find a non-empty D + i := 0 + for len(D[i]) == 0 { + i++ + } + // k is max(i, k) + if i > k { + for len(cores) <= i { + cores = append(cores, 0) + } + cores[k] = ox + k = i + } + // select from D[i] + Di := D[i] + last := len(Di) - 1 + v := Di[last] + // Add v to ordering, remove from Di + ordering[ox] = v + L.SetBit(v, 1) + D[i] = Di[:last] + // move neighbors + for _, nb := range a[v] { + if L.Bit(nb) == 1 { + continue + } + dn := d[nb] // old number of neighbors of nb + Ddn := D[dn] // nb is in this list + // remove it from the list + for wx, w := range Ddn { + if w == nb { + last := len(Ddn) - 1 + Ddn[wx], Ddn[last] = Ddn[last], Ddn[wx] + D[dn] = Ddn[:last] + } + } + dn-- // new number of neighbors + d[nb] = dn + // re--add it to it's new list + D[dn] = append(D[dn], nb) + } + } + cores[k] = len(ordering) + return +} + +// Degree for undirected graphs, returns the degree of a node. +// +// The degree of a node in an undirected graph is the number of incident +// edges, where loops count twice. +// +// If g is known to be loop-free, the result is simply equivalent to len(g[n]). +// See handshaking lemma example at AdjacencyList.ArcSize. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) Degree(n NI) int { + to := g.AdjacencyList[n] + d := len(to) // just "out" degree, + for _, to := range to { + if to == n { + d++ // except loops count twice + } + } + return d +} + +// FromList constructs a FromList representing the tree reachable from +// the given root. +// +// The connected component containing root should represent a simple graph, +// connected as a tree. +// +// For nodes connected as a tree, the Path member of the returned FromList +// will be populated with both From and Len values. The MaxLen member will be +// set but Leaves will be left a zero value. Return value cycle will be -1. +// +// If the connected component containing root is not connected as a tree, +// a cycle will be detected. The returned FromList will be a zero value and +// return value cycle will be a node involved in the cycle. +// +// Loops and parallel edges will be detected as cycles, however only in the +// connected component containing root. If g is not fully connected, nodes +// not reachable from root will have PathEnd values of {From: -1, Len: 0}. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) FromList(root NI) (f FromList, cycle NI) { + p := make([]PathEnd, len(g.AdjacencyList)) + for i := range p { + p[i].From = -1 + } + ml := 0 + var df func(NI, NI) bool + df = func(fr, n NI) bool { + l := p[n].Len + 1 + for _, to := range g.AdjacencyList[n] { + if to == fr { + continue + } + if p[to].Len > 0 { + cycle = to + return false + } + p[to] = PathEnd{From: n, Len: l} + if l > ml { + ml = l + } + if !df(n, to) { + return false + } + } + return true + } + p[root].Len = 1 + if !df(-1, root) { + return + } + return FromList{Paths: p, MaxLen: ml}, -1 +} + +// IsConnected tests if an undirected graph is a single connected component. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentReps for a method returning more information. +func (g Undirected) IsConnected() bool { + a := g.AdjacencyList + if len(a) == 0 { + return true + } + var b Bits + b.SetAll(len(a)) + var df func(NI) + df = func(n NI) { + b.SetBit(n, 0) + for _, to := range a[n] { + if b.Bit(to) == 1 { + df(to) + } + } + } + df(0) + return b.Zero() +} + +// IsTree identifies trees in undirected graphs. +// +// Return value isTree is true if the connected component reachable from root +// is a tree. Further, return value allTree is true if the entire graph g is +// connected. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g Undirected) IsTree(root NI) (isTree, allTree bool) { + a := g.AdjacencyList + var v Bits + v.SetAll(len(a)) + var df func(NI, NI) bool + df = func(fr, n NI) bool { + if v.Bit(n) == 0 { + return false + } + v.SetBit(n, 0) + for _, to := range a[n] { + if to != fr && !df(n, to) { + return false + } + } + return true + } + v.SetBit(root, 0) + for _, to := range a[root] { + if !df(root, to) { + return false, false + } + } + return true, v.Zero() +} + +// Size returns the number of edges in g. +// +// See also ArcSize and HasLoop. +func (g Undirected) Size() int { + m2 := 0 + for fr, to := range g.AdjacencyList { + m2 += len(to) + for _, to := range to { + if to == NI(fr) { + m2++ + } + } + } + return m2 / 2 +} diff --git a/vendor/github.com/soniakeys/graph/undir_cg.go b/vendor/github.com/soniakeys/graph/undir_cg.go new file mode 100644 index 00000000..35b5b97d --- /dev/null +++ b/vendor/github.com/soniakeys/graph/undir_cg.go @@ -0,0 +1,659 @@ +// Copyright 2014 Sonia Keys +// License MIT: http://opensource.org/licenses/MIT + +package graph + +// undir_RO.go is code generated from undir_cg.go by directives in graph.go. +// Editing undir_cg.go is okay. It is the code generation source. +// DO NOT EDIT undir_RO.go. +// The RO means read only and it is upper case RO to slow you down a bit +// in case you start to edit the file. + +// Bipartite determines if a connected component of an undirected graph +// is bipartite, a component where nodes can be partitioned into two sets +// such that every edge in the component goes from one set to the other. +// +// Argument n can be any representative node of the component. +// +// If the component is bipartite, Bipartite returns true and a two-coloring +// of the component. Each color set is returned as a bitmap. If the component +// is not bipartite, Bipartite returns false and a representative odd cycle. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) Bipartite(n NI) (b bool, c1, c2 Bits, oc []NI) { + b = true + var open bool + var df func(n NI, c1, c2 *Bits) + df = func(n NI, c1, c2 *Bits) { + c1.SetBit(n, 1) + for _, nb := range g.LabeledAdjacencyList[n] { + if c1.Bit(nb.To) == 1 { + b = false + oc = []NI{nb.To, n} + open = true + return + } + if c2.Bit(nb.To) == 1 { + continue + } + df(nb.To, c2, c1) + if b { + continue + } + switch { + case !open: + case n == oc[0]: + open = false + default: + oc = append(oc, n) + } + return + } + } + df(n, &c1, &c2) + if b { + return b, c1, c2, nil + } + return b, Bits{}, Bits{}, oc +} + +// BronKerbosch1 finds maximal cliques in an undirected graph. +// +// The graph must not contain parallel edges or loops. +// +// See https://en.wikipedia.org/wiki/Clique_(graph_theory) and +// https://en.wikipedia.org/wiki/Bron%E2%80%93Kerbosch_algorithm for background. +// +// This method implements the BronKerbosch1 algorithm of WP; that is, +// the original algorithm without improvements. +// +// The method calls the emit argument for each maximal clique in g, as long +// as emit returns true. If emit returns false, BronKerbosch1 returns +// immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also more sophisticated variants BronKerbosch2 and BronKerbosch3. +func (g LabeledUndirected) BronKerbosch1(emit func([]NI) bool) { + a := g.LabeledAdjacencyList + var f func(R, P, X *Bits) bool + f = func(R, P, X *Bits) bool { + switch { + case !P.Zero(): + var r2, p2, x2 Bits + pf := func(n NI) bool { + r2.Set(*R) + r2.SetBit(n, 1) + p2.Clear() + x2.Clear() + for _, to := range a[n] { + if P.Bit(to.To) == 1 { + p2.SetBit(to.To, 1) + } + if X.Bit(to.To) == 1 { + x2.SetBit(to.To, 1) + } + } + if !f(&r2, &p2, &x2) { + return false + } + P.SetBit(n, 0) + X.SetBit(n, 1) + return true + } + if !P.Iterate(pf) { + return false + } + case X.Zero(): + return emit(R.Slice()) + } + return true + } + var R, P, X Bits + P.SetAll(len(a)) + f(&R, &P, &X) +} + +// BKPivotMaxDegree is a strategy for BronKerbosch methods. +// +// To use it, take the method value (see golang.org/ref/spec#Method_values) +// and pass it as the argument to BronKerbosch2 or 3. +// +// The strategy is to pick the node from P or X with the maximum degree +// (number of edges) in g. Note this is a shortcut from evaluating degrees +// in P. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) BKPivotMaxDegree(P, X *Bits) (p NI) { + // choose pivot u as highest degree node from P or X + a := g.LabeledAdjacencyList + maxDeg := -1 + P.Iterate(func(n NI) bool { // scan P + if d := len(a[n]); d > maxDeg { + p = n + maxDeg = d + } + return true + }) + X.Iterate(func(n NI) bool { // scan X + if d := len(a[n]); d > maxDeg { + p = n + maxDeg = d + } + return true + }) + return +} + +// BKPivotMinP is a strategy for BronKerbosch methods. +// +// To use it, take the method value (see golang.org/ref/spec#Method_values) +// and pass it as the argument to BronKerbosch2 or 3. +// +// The strategy is to simply pick the first node in P. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) BKPivotMinP(P, X *Bits) NI { + return P.From(0) +} + +// BronKerbosch2 finds maximal cliques in an undirected graph. +// +// The graph must not contain parallel edges or loops. +// +// See https://en.wikipedia.org/wiki/Clique_(graph_theory) and +// https://en.wikipedia.org/wiki/Bron%E2%80%93Kerbosch_algorithm for background. +// +// This method implements the BronKerbosch2 algorithm of WP; that is, +// the original algorithm plus pivoting. +// +// The argument is a pivot function that must return a node of P or X. +// P is guaranteed to contain at least one node. X is not. +// For example see BKPivotMaxDegree. +// +// The method calls the emit argument for each maximal clique in g, as long +// as emit returns true. If emit returns false, BronKerbosch1 returns +// immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also simpler variant BronKerbosch1 and more sophisticated variant +// BronKerbosch3. +func (g LabeledUndirected) BronKerbosch2(pivot func(P, X *Bits) NI, emit func([]NI) bool) { + a := g.LabeledAdjacencyList + var f func(R, P, X *Bits) bool + f = func(R, P, X *Bits) bool { + switch { + case !P.Zero(): + var r2, p2, x2, pnu Bits + // compute P \ N(u). next 5 lines are only difference from BK1 + pnu.Set(*P) + for _, to := range a[pivot(P, X)] { + pnu.SetBit(to.To, 0) + } + // remaining code like BK1 + pf := func(n NI) bool { + r2.Set(*R) + r2.SetBit(n, 1) + p2.Clear() + x2.Clear() + for _, to := range a[n] { + if P.Bit(to.To) == 1 { + p2.SetBit(to.To, 1) + } + if X.Bit(to.To) == 1 { + x2.SetBit(to.To, 1) + } + } + if !f(&r2, &p2, &x2) { + return false + } + P.SetBit(n, 0) + X.SetBit(n, 1) + return true + } + if !pnu.Iterate(pf) { + return false + } + case X.Zero(): + return emit(R.Slice()) + } + return true + } + var R, P, X Bits + P.SetAll(len(a)) + f(&R, &P, &X) +} + +// BronKerbosch3 finds maximal cliques in an undirected graph. +// +// The graph must not contain parallel edges or loops. +// +// See https://en.wikipedia.org/wiki/Clique_(graph_theory) and +// https://en.wikipedia.org/wiki/Bron%E2%80%93Kerbosch_algorithm for background. +// +// This method implements the BronKerbosch3 algorithm of WP; that is, +// the original algorithm with pivoting and degeneracy ordering. +// +// The argument is a pivot function that must return a node of P or X. +// P is guaranteed to contain at least one node. X is not. +// For example see BKPivotMaxDegree. +// +// The method calls the emit argument for each maximal clique in g, as long +// as emit returns true. If emit returns false, BronKerbosch1 returns +// immediately. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also simpler variants BronKerbosch1 and BronKerbosch2. +func (g LabeledUndirected) BronKerbosch3(pivot func(P, X *Bits) NI, emit func([]NI) bool) { + a := g.LabeledAdjacencyList + var f func(R, P, X *Bits) bool + f = func(R, P, X *Bits) bool { + switch { + case !P.Zero(): + var r2, p2, x2, pnu Bits + // compute P \ N(u). next lines are only difference from BK1 + pnu.Set(*P) + for _, to := range a[pivot(P, X)] { + pnu.SetBit(to.To, 0) + } + // remaining code like BK2 + pf := func(n NI) bool { + r2.Set(*R) + r2.SetBit(n, 1) + p2.Clear() + x2.Clear() + for _, to := range a[n] { + if P.Bit(to.To) == 1 { + p2.SetBit(to.To, 1) + } + if X.Bit(to.To) == 1 { + x2.SetBit(to.To, 1) + } + } + if !f(&r2, &p2, &x2) { + return false + } + P.SetBit(n, 0) + X.SetBit(n, 1) + return true + } + if !pnu.Iterate(pf) { + return false + } + case X.Zero(): + return emit(R.Slice()) + } + return true + } + var R, P, X Bits + P.SetAll(len(a)) + // code above same as BK2 + // code below new to BK3 + _, ord, _ := g.Degeneracy() + var p2, x2 Bits + for _, n := range ord { + R.SetBit(n, 1) + p2.Clear() + x2.Clear() + for _, to := range a[n] { + if P.Bit(to.To) == 1 { + p2.SetBit(to.To, 1) + } + if X.Bit(to.To) == 1 { + x2.SetBit(to.To, 1) + } + } + if !f(&R, &p2, &x2) { + return + } + R.SetBit(n, 0) + P.SetBit(n, 0) + X.SetBit(n, 1) + } +} + +// ConnectedComponentBits returns a function that iterates over connected +// components of g, returning a member bitmap for each. +// +// Each call of the returned function returns the order (number of nodes) +// and bits of a connected component. The returned function returns zeros +// after returning all connected components. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentReps, which has lighter weight return values. +func (g LabeledUndirected) ConnectedComponentBits() func() (order int, bits Bits) { + a := g.LabeledAdjacencyList + var vg Bits // nodes visited in graph + var vc *Bits // nodes visited in current component + var nc int + var df func(NI) + df = func(n NI) { + vg.SetBit(n, 1) + vc.SetBit(n, 1) + nc++ + for _, nb := range a[n] { + if vg.Bit(nb.To) == 0 { + df(nb.To) + } + } + return + } + var n NI + return func() (o int, bits Bits) { + for ; n < NI(len(a)); n++ { + if vg.Bit(n) == 0 { + vc = &bits + nc = 0 + df(n) + return nc, bits + } + } + return + } +} + +// ConnectedComponentLists returns a function that iterates over connected +// components of g, returning the member list of each. +// +// Each call of the returned function returns a node list of a connected +// component. The returned function returns nil after returning all connected +// components. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentReps, which has lighter weight return values. +func (g LabeledUndirected) ConnectedComponentLists() func() []NI { + a := g.LabeledAdjacencyList + var vg Bits // nodes visited in graph + var m []NI // members of current component + var df func(NI) + df = func(n NI) { + vg.SetBit(n, 1) + m = append(m, n) + for _, nb := range a[n] { + if vg.Bit(nb.To) == 0 { + df(nb.To) + } + } + return + } + var n NI + return func() []NI { + for ; n < NI(len(a)); n++ { + if vg.Bit(n) == 0 { + m = nil + df(n) + return m + } + } + return nil + } +} + +// ConnectedComponentReps returns a representative node from each connected +// component of g. +// +// Returned is a slice with a single representative node from each connected +// component and also a parallel slice with the order, or number of nodes, +// in the corresponding component. +// +// This is fairly minimal information describing connected components. +// From a representative node, other nodes in the component can be reached +// by depth first traversal for example. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentBits and ConnectedComponentLists which can +// collect component members in a single traversal, and IsConnected which +// is an even simpler boolean test. +func (g LabeledUndirected) ConnectedComponentReps() (reps []NI, orders []int) { + a := g.LabeledAdjacencyList + var c Bits + var o int + var df func(NI) + df = func(n NI) { + c.SetBit(n, 1) + o++ + for _, nb := range a[n] { + if c.Bit(nb.To) == 0 { + df(nb.To) + } + } + return + } + for n := range a { + if c.Bit(NI(n)) == 0 { + reps = append(reps, NI(n)) + o = 0 + df(NI(n)) + orders = append(orders, o) + } + } + return +} + +// Copy makes a deep copy of g. +// Copy also computes the arc size ma, the number of arcs. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) Copy() (c LabeledUndirected, ma int) { + l, s := g.LabeledAdjacencyList.Copy() + return LabeledUndirected{l}, s +} + +// Degeneracy computes k-degeneracy, vertex ordering and k-cores. +// +// See Wikipedia https://en.wikipedia.org/wiki/Degeneracy_(graph_theory) +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) Degeneracy() (k int, ordering []NI, cores []int) { + a := g.LabeledAdjacencyList + // WP algorithm + ordering = make([]NI, len(a)) + var L Bits + d := make([]int, len(a)) + var D [][]NI + for v, nb := range a { + dv := len(nb) + d[v] = dv + for len(D) <= dv { + D = append(D, nil) + } + D[dv] = append(D[dv], NI(v)) + } + for ox := range a { + // find a non-empty D + i := 0 + for len(D[i]) == 0 { + i++ + } + // k is max(i, k) + if i > k { + for len(cores) <= i { + cores = append(cores, 0) + } + cores[k] = ox + k = i + } + // select from D[i] + Di := D[i] + last := len(Di) - 1 + v := Di[last] + // Add v to ordering, remove from Di + ordering[ox] = v + L.SetBit(v, 1) + D[i] = Di[:last] + // move neighbors + for _, nb := range a[v] { + if L.Bit(nb.To) == 1 { + continue + } + dn := d[nb.To] // old number of neighbors of nb + Ddn := D[dn] // nb is in this list + // remove it from the list + for wx, w := range Ddn { + if w == nb.To { + last := len(Ddn) - 1 + Ddn[wx], Ddn[last] = Ddn[last], Ddn[wx] + D[dn] = Ddn[:last] + } + } + dn-- // new number of neighbors + d[nb.To] = dn + // re--add it to it's new list + D[dn] = append(D[dn], nb.To) + } + } + cores[k] = len(ordering) + return +} + +// Degree for undirected graphs, returns the degree of a node. +// +// The degree of a node in an undirected graph is the number of incident +// edges, where loops count twice. +// +// If g is known to be loop-free, the result is simply equivalent to len(g[n]). +// See handshaking lemma example at AdjacencyList.ArcSize. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) Degree(n NI) int { + to := g.LabeledAdjacencyList[n] + d := len(to) // just "out" degree, + for _, to := range to { + if to.To == n { + d++ // except loops count twice + } + } + return d +} + +// FromList constructs a FromList representing the tree reachable from +// the given root. +// +// The connected component containing root should represent a simple graph, +// connected as a tree. +// +// For nodes connected as a tree, the Path member of the returned FromList +// will be populated with both From and Len values. The MaxLen member will be +// set but Leaves will be left a zero value. Return value cycle will be -1. +// +// If the connected component containing root is not connected as a tree, +// a cycle will be detected. The returned FromList will be a zero value and +// return value cycle will be a node involved in the cycle. +// +// Loops and parallel edges will be detected as cycles, however only in the +// connected component containing root. If g is not fully connected, nodes +// not reachable from root will have PathEnd values of {From: -1, Len: 0}. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) FromList(root NI) (f FromList, cycle NI) { + p := make([]PathEnd, len(g.LabeledAdjacencyList)) + for i := range p { + p[i].From = -1 + } + ml := 0 + var df func(NI, NI) bool + df = func(fr, n NI) bool { + l := p[n].Len + 1 + for _, to := range g.LabeledAdjacencyList[n] { + if to.To == fr { + continue + } + if p[to.To].Len > 0 { + cycle = to.To + return false + } + p[to.To] = PathEnd{From: n, Len: l} + if l > ml { + ml = l + } + if !df(n, to.To) { + return false + } + } + return true + } + p[root].Len = 1 + if !df(-1, root) { + return + } + return FromList{Paths: p, MaxLen: ml}, -1 +} + +// IsConnected tests if an undirected graph is a single connected component. +// +// There are equivalent labeled and unlabeled versions of this method. +// +// See also ConnectedComponentReps for a method returning more information. +func (g LabeledUndirected) IsConnected() bool { + a := g.LabeledAdjacencyList + if len(a) == 0 { + return true + } + var b Bits + b.SetAll(len(a)) + var df func(NI) + df = func(n NI) { + b.SetBit(n, 0) + for _, to := range a[n] { + if b.Bit(to.To) == 1 { + df(to.To) + } + } + } + df(0) + return b.Zero() +} + +// IsTree identifies trees in undirected graphs. +// +// Return value isTree is true if the connected component reachable from root +// is a tree. Further, return value allTree is true if the entire graph g is +// connected. +// +// There are equivalent labeled and unlabeled versions of this method. +func (g LabeledUndirected) IsTree(root NI) (isTree, allTree bool) { + a := g.LabeledAdjacencyList + var v Bits + v.SetAll(len(a)) + var df func(NI, NI) bool + df = func(fr, n NI) bool { + if v.Bit(n) == 0 { + return false + } + v.SetBit(n, 0) + for _, to := range a[n] { + if to.To != fr && !df(n, to.To) { + return false + } + } + return true + } + v.SetBit(root, 0) + for _, to := range a[root] { + if !df(root, to.To) { + return false, false + } + } + return true, v.Zero() +} + +// Size returns the number of edges in g. +// +// See also ArcSize and HasLoop. +func (g LabeledUndirected) Size() int { + m2 := 0 + for fr, to := range g.LabeledAdjacencyList { + m2 += len(to) + for _, to := range to { + if to.To == NI(fr) { + m2++ + } + } + } + return m2 / 2 +} diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 00000000..56efb95b --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,156 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out <-chan Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go b/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go new file mode 100644 index 00000000..e3170e33 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go @@ -0,0 +1,19 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package ctxhttp + +import "net/http" + +func canceler(client *http.Client, req *http.Request) func() { + // TODO(djd): Respect any existing value of req.Cancel. + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go new file mode 100644 index 00000000..56bcbadb --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package ctxhttp + +import "net/http" + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +func canceler(client *http.Client, req *http.Request) func() { + rc, ok := client.Transport.(requestCanceler) + if !ok { + return func() {} + } + return func() { + rc.CancelRequest(req) + } +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 00000000..e35860a7 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,145 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. + cancel := canceler(client, req) + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + // Make local copies of test hooks closed over by goroutines below. + // Prevents data races in tests. + testHookDoReturned := testHookDoReturned + testHookDidBodyClose := testHookDidBodyClose + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + cancel() + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + cancel() + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 00000000..f8cda19a --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 00000000..5a30acab --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml new file mode 100644 index 00000000..a035125c --- /dev/null +++ b/vendor/golang.org/x/oauth2/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.3 + - 1.4 + +install: + - export GOPATH="$HOME/gopath" + - mkdir -p "$GOPATH/src/golang.org/x" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" + - go get -v -t -d golang.org/x/oauth2/... + +script: + - go test -v golang.org/x/oauth2/... diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/vendor/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 00000000..46aa2b12 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE new file mode 100644 index 00000000..d02f24fd --- /dev/null +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The oauth2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md new file mode 100644 index 00000000..0d514173 --- /dev/null +++ b/vendor/golang.org/x/oauth2/README.md @@ -0,0 +1,64 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + + +## App Engine + +In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor +of the [`context.Context`](https://golang.org/x/net/context#Context) type from +the `golang.org/x/net/context` package + +This means its no longer possible to use the "Classic App Engine" +`appengine.Context` type with the `oauth2` package. (You're using +Classic App Engine if you import the package `"appengine"`.) + +To work around this, you may use the new `"google.golang.org/appengine"` +package. This package has almost the same API as the `"appengine"` package, +but it can be fetched with `go get` and used on "Managed VMs" and well as +Classic App Engine. + +See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) +for information on updating your app. + +If you don't want to update your entire app to use the new App Engine packages, +you may use both sets of packages in parallel, using only the new packages +with the `oauth2` package. + + import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" + ) + + func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, + } + client.Get("...") + } + diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go new file mode 100644 index 00000000..8962c49d --- /dev/null +++ b/vendor/golang.org/x/oauth2/client_appengine.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// App Engine hooks. + +package oauth2 + +import ( + "net/http" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" + "google.golang.org/appengine/urlfetch" +) + +func init() { + internal.RegisterContextClientFunc(contextClientAppEngine) +} + +func contextClientAppEngine(ctx context.Context) (*http.Client, error) { + return urlfetch.Client(ctx), nil +} diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 00000000..fbe1028d --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,76 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "bufio" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "strings" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +func ParseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": map[string]string{}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +func CondVal(v string) []string { + if v == "" { + return nil + } + return []string{v} +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go new file mode 100644 index 00000000..a6ed3cc7 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,225 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" +) + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + if v := e.Expires; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + *e = expirationTime(i) + return nil +} + +var brokenAuthHeaderProviders = []string{ + "https://accounts.google.com/", + "https://api.dropbox.com/", + "https://api.dropboxapi.com/", + "https://api.instagram.com/", + "https://api.netatmo.net/", + "https://api.odnoklassniki.ru/", + "https://api.pushbullet.com/", + "https://api.soundcloud.com/", + "https://api.twitch.tv/", + "https://app.box.com/", + "https://connect.stripe.com/", + "https://login.microsoftonline.com/", + "https://login.salesforce.com/", + "https://oauth.sandbox.trainingpeaks.com/", + "https://oauth.trainingpeaks.com/", + "https://oauth.vk.com/", + "https://openapi.baidu.com/", + "https://slack.com/", + "https://test-sandbox.auth.corp.google.com", + "https://test.salesforce.com/", + "https://user.gini.net/", + "https://www.douban.com/", + "https://www.googleapis.com/", + "https://www.linkedin.com/", + "https://www.strava.com/oauth/", + "https://www.wunderlist.com/oauth/", + "https://api.patreon.com/", +} + +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) +} + +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client secret in the Authorization header +// - Dropbox accepts either it in URL param or Auth header, but not both. +// - Google only accepts URL param (not spec compliant?), not Auth header +// - Stripe only accepts client secret in Auth header with Bearer method, not Basic +func providerAuthHeaderWorks(tokenURL string) bool { + for _, s := range brokenAuthHeaderProviders { + if strings.HasPrefix(tokenURL, s) { + // Some sites fail to implement the OAuth2 spec fully. + return false + } + } + + // Assume the provider implements the spec properly + // otherwise. We can add more exceptions as they're + // discovered. We will _not_ be adding configurable hooks + // to this package to let users select server bugs. + return true +} + +func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) { + hc, err := ContextClient(ctx) + if err != nil { + return nil, err + } + v.Set("client_id", ClientID) + bustedAuth := !providerAuthHeaderWorks(TokenURL) + if bustedAuth && ClientSecret != "" { + v.Set("client_secret", ClientSecret) + } + req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if !bustedAuth { + req.SetBasicAuth(ClientID, ClientSecret) + } + r, err := hc.Do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + if e == "" { + // TODO(jbd): Facebook's OAuth2 implementation is broken and + // returns expires_in field in expires. Remove the fallback to expires, + // when Facebook fixes their implementation. + e = vals.Get("expires") + } + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 00000000..f1f173e3 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,69 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +// ContextClientFunc is a func which tries to return an *http.Client +// given a Context value. If it returns an error, the search stops +// with that error. If it returns (nil, nil), the search continues +// down the list of registered funcs. +type ContextClientFunc func(context.Context) (*http.Client, error) + +var contextClientFuncs []ContextClientFunc + +func RegisterContextClientFunc(fn ContextClientFunc) { + contextClientFuncs = append(contextClientFuncs, fn) +} + +func ContextClient(ctx context.Context) (*http.Client, error) { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc, nil + } + } + for _, fn := range contextClientFuncs { + c, err := fn(ctx) + if err != nil { + return nil, err + } + if c != nil { + return c, nil + } + } + return http.DefaultClient, nil +} + +func ContextTransport(ctx context.Context) http.RoundTripper { + hc, err := ContextClient(ctx) + // This is a rare error case (somebody using nil on App Engine). + if err != nil { + return ErrorTransport{err} + } + return hc.Transport +} + +// ErrorTransport returns the specified error on RoundTrip. +// This RoundTripper should be used in rare error cases where +// error handling can be postponed to response handling time. +type ErrorTransport struct{ Err error } + +func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { + return nil, t.Err +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go new file mode 100644 index 00000000..9b7b977d --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,337 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests. +// It can additionally grant authorization with Bearer JWT. +package oauth2 + +import ( + "bytes" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +var NoContext = context.TODO() + +// RegisterBrokenAuthHeaderProvider registers an OAuth2 server +// identified by the tokenURL prefix as an OAuth2 implementation +// which doesn't support the HTTP Basic authentication +// scheme to authenticate with the authorization server. +// Once a server is registered, credentials (client_id and client_secret) +// will be passed as query parameters rather than being present +// in the Authorization header. +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + internal.RegisterBrokenAuthHeaderProvider(tokenURL) +} + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint contains the OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string +} + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-zero string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + "state": internal.CondVal(state), + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The HTTP client to use is derived from the context. +// If a client is not provided via the context, http.DefaultClient is used. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + c, err := internal.ContextClient(ctx) + if err != nil { + return &http.Client{Transport: internal.ErrorTransport{err}} + } + return c + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextTransport(ctx), + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go new file mode 100644 index 00000000..7a3167f1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/token.go @@ -0,0 +1,158 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v +} + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Add(-expiryDelta).Before(time.Now()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) + if err != nil { + return nil, err + } + return tokenFromInternal(tk), nil +} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go new file mode 100644 index 00000000..92ac7e25 --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport.go @@ -0,0 +1,132 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/google.golang.org/api/LICENSE b/vendor/google.golang.org/api/LICENSE new file mode 100644 index 00000000..263aa7a0 --- /dev/null +++ b/vendor/google.golang.org/api/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/api/drive/v3/drive-api.json b/vendor/google.golang.org/api/drive/v3/drive-api.json new file mode 100644 index 00000000..896d44e3 --- /dev/null +++ b/vendor/google.golang.org/api/drive/v3/drive-api.json @@ -0,0 +1,2410 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/O9_NbpoVnW5GMGl7qWBIajcyrt8\"", + "discoveryVersion": "v1", + "id": "drive:v3", + "name": "drive", + "version": "v3", + "revision": "20160303", + "title": "Drive API", + "description": "The API to interact with Drive.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_16.png", + "x32": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_32.png" + }, + "documentationLink": "https://developers.google.com/drive/", + "protocol": "rest", + "baseUrl": "https://www.googleapis.com/drive/v3/", + "basePath": "/drive/v3/", + "rootUrl": "https://www.googleapis.com/", + "servicePath": "drive/v3/", + "batchPath": "batch", + "parameters": { + "alt": { + "type": "string", + "description": "Data format for the response.", + "default": "json", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", + "location": "query" + }, + "userIp": { + "type": "string", + "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/drive": { + "description": "View and manage the files in your Google Drive" + }, + "https://www.googleapis.com/auth/drive.appdata": { + "description": "View and manage its own configuration data in your Google Drive" + }, + "https://www.googleapis.com/auth/drive.file": { + "description": "View and manage Google Drive files and folders that you have opened or created with this app" + }, + "https://www.googleapis.com/auth/drive.metadata": { + "description": "View and manage metadata of files in your Google Drive" + }, + "https://www.googleapis.com/auth/drive.metadata.readonly": { + "description": "View metadata for files in your Google Drive" + }, + "https://www.googleapis.com/auth/drive.photos.readonly": { + "description": "View the photos, videos and albums in your Google Photos" + }, + "https://www.googleapis.com/auth/drive.readonly": { + "description": "View the files in your Google Drive" + }, + "https://www.googleapis.com/auth/drive.scripts": { + "description": "Modify your Google Apps Script scripts' behavior" + } + } + } + }, + "schemas": { + "About": { + "id": "About", + "type": "object", + "description": "Information about the user, the user's Drive, and system capabilities.", + "properties": { + "appInstalled": { + "type": "boolean", + "description": "Whether the user has installed the requesting app." + }, + "exportFormats": { + "type": "object", + "description": "A map of source MIME type to possible targets for all supported exports.", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "folderColorPalette": { + "type": "array", + "description": "The currently supported folder colors as RGB hex strings.", + "items": { + "type": "string" + } + }, + "importFormats": { + "type": "object", + "description": "A map of source MIME type to possible targets for all supported imports.", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "kind": { + "type": "string", + "description": "This is always drive#about.", + "default": "drive#about" + }, + "maxImportSizes": { + "type": "object", + "description": "A map of maximum import sizes by MIME type, in bytes.", + "additionalProperties": { + "type": "string", + "format": "int64" + } + }, + "maxUploadSize": { + "type": "string", + "description": "The maximum upload size in bytes.", + "format": "int64" + }, + "storageQuota": { + "type": "object", + "description": "The user's storage quota limits and usage. All fields are measured in bytes.", + "properties": { + "limit": { + "type": "string", + "description": "The usage limit, if applicable. This will not be present if the user has unlimited storage.", + "format": "int64" + }, + "usage": { + "type": "string", + "description": "The total usage across all services.", + "format": "int64" + }, + "usageInDrive": { + "type": "string", + "description": "The usage by all files in Google Drive.", + "format": "int64" + }, + "usageInDriveTrash": { + "type": "string", + "description": "The usage by trashed files in Google Drive.", + "format": "int64" + } + } + }, + "user": { + "$ref": "User", + "description": "The authenticated user." + } + } + }, + "Change": { + "id": "Change", + "type": "object", + "description": "A change to a file.", + "properties": { + "file": { + "$ref": "File", + "description": "The updated state of the file. Present if the file has not been removed." + }, + "fileId": { + "type": "string", + "description": "The ID of the file which has changed." + }, + "kind": { + "type": "string", + "description": "This is always drive#change.", + "default": "drive#change" + }, + "removed": { + "type": "boolean", + "description": "Whether the file has been removed from the view of the changes list, for example by deletion or lost access." + }, + "time": { + "type": "string", + "description": "The time of this change (RFC 3339 date-time).", + "format": "date-time" + } + } + }, + "ChangeList": { + "id": "ChangeList", + "type": "object", + "description": "A list of changes for a user.", + "properties": { + "changes": { + "type": "array", + "description": "The page of changes.", + "items": { + "$ref": "Change" + } + }, + "kind": { + "type": "string", + "description": "This is always drive#changeList.", + "default": "drive#changeList" + }, + "newStartPageToken": { + "type": "string", + "description": "The starting page token for future changes. This will be present only if the end of the current changes list has been reached." + }, + "nextPageToken": { + "type": "string", + "description": "The page token for the next page of changes. This will be absent if the end of the current changes list has been reached." + } + } + }, + "Channel": { + "id": "Channel", + "type": "object", + "description": "An notification channel used to watch for resource changes.", + "properties": { + "address": { + "type": "string", + "description": "The address where notifications are delivered for this channel." + }, + "expiration": { + "type": "string", + "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.", + "format": "int64" + }, + "id": { + "type": "string", + "description": "A UUID or similar unique string that identifies this channel." + }, + "kind": { + "type": "string", + "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".", + "default": "api#channel" + }, + "params": { + "type": "object", + "description": "Additional parameters controlling delivery channel behavior. Optional.", + "additionalProperties": { + "type": "string", + "description": "Declares a new parameter by name." + } + }, + "payload": { + "type": "boolean", + "description": "A Boolean value to indicate whether payload is wanted. Optional." + }, + "resourceId": { + "type": "string", + "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions." + }, + "resourceUri": { + "type": "string", + "description": "A version-specific identifier for the watched resource." + }, + "token": { + "type": "string", + "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional." + }, + "type": { + "type": "string", + "description": "The type of delivery mechanism used for this channel." + } + } + }, + "Comment": { + "id": "Comment", + "type": "object", + "description": "A comment on a file.", + "properties": { + "anchor": { + "type": "string", + "description": "A region of the document represented as a JSON string. See anchor documentation for details on how to define and interpret anchor properties." + }, + "author": { + "$ref": "User", + "description": "The user who created the comment." + }, + "content": { + "type": "string", + "description": "The plain text content of the comment. This field is used for setting the content, while htmlContent should be displayed.", + "annotations": { + "required": [ + "drive.comments.create", + "drive.comments.update" + ] + } + }, + "createdTime": { + "type": "string", + "description": "The time at which the comment was created (RFC 3339 date-time).", + "format": "date-time" + }, + "deleted": { + "type": "boolean", + "description": "Whether the comment has been deleted. A deleted comment has no content." + }, + "htmlContent": { + "type": "string", + "description": "The content of the comment with HTML formatting." + }, + "id": { + "type": "string", + "description": "The ID of the comment." + }, + "kind": { + "type": "string", + "description": "This is always drive#comment.", + "default": "drive#comment" + }, + "modifiedTime": { + "type": "string", + "description": "The last time the comment or any of its replies was modified (RFC 3339 date-time).", + "format": "date-time" + }, + "quotedFileContent": { + "type": "object", + "description": "The file content to which the comment refers, typically within the anchor region. For a text file, for example, this would be the text at the location of the comment.", + "properties": { + "mimeType": { + "type": "string", + "description": "The MIME type of the quoted content." + }, + "value": { + "type": "string", + "description": "The quoted content itself. This is interpreted as plain text if set through the API." + } + } + }, + "replies": { + "type": "array", + "description": "The full list of replies to the comment in chronological order.", + "items": { + "$ref": "Reply" + } + }, + "resolved": { + "type": "boolean", + "description": "Whether the comment has been resolved by one of its replies." + } + } + }, + "CommentList": { + "id": "CommentList", + "type": "object", + "description": "A list of comments on a file.", + "properties": { + "comments": { + "type": "array", + "description": "The page of comments.", + "items": { + "$ref": "Comment" + } + }, + "kind": { + "type": "string", + "description": "This is always drive#commentList.", + "default": "drive#commentList" + }, + "nextPageToken": { + "type": "string", + "description": "The page token for the next page of comments. This will be absent if the end of the comments list has been reached." + } + } + }, + "File": { + "id": "File", + "type": "object", + "description": "The metadata for a file.", + "properties": { + "appProperties": { + "type": "object", + "description": "A collection of arbitrary key-value pairs which are private to the requesting app.\nEntries with null values are cleared in update and copy requests.", + "additionalProperties": { + "type": "string" + } + }, + "capabilities": { + "type": "object", + "description": "Capabilities the current user has on the file.", + "properties": { + "canComment": { + "type": "boolean", + "description": "Whether the user can comment on the file." + }, + "canCopy": { + "type": "boolean", + "description": "Whether the user can copy the file." + }, + "canEdit": { + "type": "boolean", + "description": "Whether the user can edit the file's content." + }, + "canReadRevisions": { + "type": "boolean", + "description": "Whether the current user has read access to the Revisions resource of the file." + }, + "canShare": { + "type": "boolean", + "description": "Whether the user can modify the file's permissions and sharing settings." + } + } + }, + "contentHints": { + "type": "object", + "description": "Additional information about the content of the file. These fields are never populated in responses.", + "properties": { + "indexableText": { + "type": "string", + "description": "Text to be indexed for the file to improve fullText queries. This is limited to 128KB in length and may contain HTML elements." + }, + "thumbnail": { + "type": "object", + "description": "A thumbnail for the file. This will only be used if Drive cannot generate a standard thumbnail.", + "properties": { + "image": { + "type": "string", + "description": "The thumbnail data encoded with URL-safe Base64 (RFC 4648 section 5).", + "format": "byte" + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the thumbnail." + } + } + } + } + }, + "createdTime": { + "type": "string", + "description": "The time at which the file was created (RFC 3339 date-time).", + "format": "date-time" + }, + "description": { + "type": "string", + "description": "A short description of the file." + }, + "explicitlyTrashed": { + "type": "boolean", + "description": "Whether the file has been explicitly trashed, as opposed to recursively trashed from a parent folder." + }, + "fileExtension": { + "type": "string", + "description": "The final component of fullFileExtension. This is only available for files with binary content in Drive." + }, + "folderColorRgb": { + "type": "string", + "description": "The color for a folder as an RGB hex string. The supported colors are published in the folderColorPalette field of the About resource.\nIf an unsupported color is specified, the closest color in the palette will be used instead." + }, + "fullFileExtension": { + "type": "string", + "description": "The full file extension extracted from the name field. May contain multiple concatenated extensions, such as \"tar.gz\". This is only available for files with binary content in Drive.\nThis is automatically updated when the name field changes, however it is not cleared if the new name does not contain a valid extension." + }, + "headRevisionId": { + "type": "string", + "description": "The ID of the file's head revision. This is currently only available for files with binary content in Drive." + }, + "iconLink": { + "type": "string", + "description": "A static, unauthenticated link to the file's icon." + }, + "id": { + "type": "string", + "description": "The ID of the file." + }, + "imageMediaMetadata": { + "type": "object", + "description": "Additional metadata about image media, if available.", + "properties": { + "aperture": { + "type": "number", + "description": "The aperture used to create the photo (f-number).", + "format": "float" + }, + "cameraMake": { + "type": "string", + "description": "The make of the camera used to create the photo." + }, + "cameraModel": { + "type": "string", + "description": "The model of the camera used to create the photo." + }, + "colorSpace": { + "type": "string", + "description": "The color space of the photo." + }, + "exposureBias": { + "type": "number", + "description": "The exposure bias of the photo (APEX value).", + "format": "float" + }, + "exposureMode": { + "type": "string", + "description": "The exposure mode used to create the photo." + }, + "exposureTime": { + "type": "number", + "description": "The length of the exposure, in seconds.", + "format": "float" + }, + "flashUsed": { + "type": "boolean", + "description": "Whether a flash was used to create the photo." + }, + "focalLength": { + "type": "number", + "description": "The focal length used to create the photo, in millimeters.", + "format": "float" + }, + "height": { + "type": "integer", + "description": "The height of the image in pixels.", + "format": "int32" + }, + "isoSpeed": { + "type": "integer", + "description": "The ISO speed used to create the photo.", + "format": "int32" + }, + "lens": { + "type": "string", + "description": "The lens used to create the photo." + }, + "location": { + "type": "object", + "description": "Geographic location information stored in the image.", + "properties": { + "altitude": { + "type": "number", + "description": "The altitude stored in the image.", + "format": "double" + }, + "latitude": { + "type": "number", + "description": "The latitude stored in the image.", + "format": "double" + }, + "longitude": { + "type": "number", + "description": "The longitude stored in the image.", + "format": "double" + } + } + }, + "maxApertureValue": { + "type": "number", + "description": "The smallest f-number of the lens at the focal length used to create the photo (APEX value).", + "format": "float" + }, + "meteringMode": { + "type": "string", + "description": "The metering mode used to create the photo." + }, + "rotation": { + "type": "integer", + "description": "The rotation in clockwise degrees from the image's original orientation.", + "format": "int32" + }, + "sensor": { + "type": "string", + "description": "The type of sensor used to create the photo." + }, + "subjectDistance": { + "type": "integer", + "description": "The distance to the subject of the photo, in meters.", + "format": "int32" + }, + "time": { + "type": "string", + "description": "The date and time the photo was taken (EXIF DateTime)." + }, + "whiteBalance": { + "type": "string", + "description": "The white balance mode used to create the photo." + }, + "width": { + "type": "integer", + "description": "The width of the image in pixels.", + "format": "int32" + } + } + }, + "kind": { + "type": "string", + "description": "This is always drive#file.", + "default": "drive#file" + }, + "lastModifyingUser": { + "$ref": "User", + "description": "The last user to modify the file." + }, + "md5Checksum": { + "type": "string", + "description": "The MD5 checksum for the content of the file. This is only applicable to files with binary content in Drive." + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the file.\nDrive will attempt to automatically detect an appropriate value from uploaded content if no value is provided. The value cannot be changed unless a new revision is uploaded.\nIf a file is created with a Google Doc MIME type, the uploaded content will be imported if possible. The supported import formats are published in the About resource." + }, + "modifiedByMeTime": { + "type": "string", + "description": "The last time the file was modified by the user (RFC 3339 date-time).", + "format": "date-time" + }, + "modifiedTime": { + "type": "string", + "description": "The last time the file was modified by anyone (RFC 3339 date-time).\nNote that setting modifiedTime will also update modifiedByMeTime for the user.", + "format": "date-time" + }, + "name": { + "type": "string", + "description": "The name of the file. This is not necessarily unique within a folder." + }, + "originalFilename": { + "type": "string", + "description": "The original filename of the uploaded content if available, or else the original value of the name field. This is only available for files with binary content in Drive." + }, + "ownedByMe": { + "type": "boolean", + "description": "Whether the user owns the file." + }, + "owners": { + "type": "array", + "description": "The owners of the file. Currently, only certain legacy files may have more than one owner.", + "items": { + "$ref": "User" + } + }, + "parents": { + "type": "array", + "description": "The IDs of the parent folders which contain the file.\nIf not specified as part of a create request, the file will be placed directly in the My Drive folder. Update requests must use the addParents and removeParents parameters to modify the values.", + "items": { + "type": "string" + } + }, + "permissions": { + "type": "array", + "description": "The full list of permissions for the file. This is only available if the requesting user can share the file.", + "items": { + "$ref": "Permission" + } + }, + "properties": { + "type": "object", + "description": "A collection of arbitrary key-value pairs which are visible to all apps.\nEntries with null values are cleared in update and copy requests.", + "additionalProperties": { + "type": "string" + } + }, + "quotaBytesUsed": { + "type": "string", + "description": "The number of storage quota bytes used by the file. This includes the head revision as well as previous revisions with keepForever enabled.", + "format": "int64" + }, + "shared": { + "type": "boolean", + "description": "Whether the file has been shared." + }, + "sharedWithMeTime": { + "type": "string", + "description": "The time at which the file was shared with the user, if applicable (RFC 3339 date-time).", + "format": "date-time" + }, + "sharingUser": { + "$ref": "User", + "description": "The user who shared the file with the requesting user, if applicable." + }, + "size": { + "type": "string", + "description": "The size of the file's content in bytes. This is only applicable to files with binary content in Drive.", + "format": "int64" + }, + "spaces": { + "type": "array", + "description": "The list of spaces which contain the file. The currently supported values are 'drive', 'appDataFolder' and 'photos'.", + "items": { + "type": "string" + } + }, + "starred": { + "type": "boolean", + "description": "Whether the user has starred the file." + }, + "thumbnailLink": { + "type": "string", + "description": "A short-lived link to the file's thumbnail, if available. Typically lasts on the order of hours." + }, + "trashed": { + "type": "boolean", + "description": "Whether the file has been trashed, either explicitly or from a trashed parent folder. Only the owner may trash a file, and other users cannot see files in the owner's trash." + }, + "version": { + "type": "string", + "description": "A monotonically increasing version number for the file. This reflects every change made to the file on the server, even those not visible to the user.", + "format": "int64" + }, + "videoMediaMetadata": { + "type": "object", + "description": "Additional metadata about video media. This may not be available immediately upon upload.", + "properties": { + "durationMillis": { + "type": "string", + "description": "The duration of the video in milliseconds.", + "format": "int64" + }, + "height": { + "type": "integer", + "description": "The height of the video in pixels.", + "format": "int32" + }, + "width": { + "type": "integer", + "description": "The width of the video in pixels.", + "format": "int32" + } + } + }, + "viewedByMe": { + "type": "boolean", + "description": "Whether the file has been viewed by this user." + }, + "viewedByMeTime": { + "type": "string", + "description": "The last time the file was viewed by the user (RFC 3339 date-time).", + "format": "date-time" + }, + "viewersCanCopyContent": { + "type": "boolean", + "description": "Whether users with only reader or commenter permission can copy the file's content. This affects copy, download, and print operations." + }, + "webContentLink": { + "type": "string", + "description": "A link for downloading the content of the file in a browser. This is only available for files with binary content in Drive." + }, + "webViewLink": { + "type": "string", + "description": "A link for opening the file in a relevant Google editor or viewer in a browser." + }, + "writersCanShare": { + "type": "boolean", + "description": "Whether users with only writer permission can modify the file's permissions." + } + } + }, + "FileList": { + "id": "FileList", + "type": "object", + "description": "A list of files.", + "properties": { + "files": { + "type": "array", + "description": "The page of files.", + "items": { + "$ref": "File" + } + }, + "kind": { + "type": "string", + "description": "This is always drive#fileList.", + "default": "drive#fileList" + }, + "nextPageToken": { + "type": "string", + "description": "The page token for the next page of files. This will be absent if the end of the files list has been reached." + } + } + }, + "GeneratedIds": { + "id": "GeneratedIds", + "type": "object", + "description": "A list of generated file IDs which can be provided in create requests.", + "properties": { + "ids": { + "type": "array", + "description": "The IDs generated for the requesting user in the specified space.", + "items": { + "type": "string" + } + }, + "kind": { + "type": "string", + "description": "This is always drive#generatedIds", + "default": "drive#generatedIds" + }, + "space": { + "type": "string", + "description": "The type of file that can be created with these IDs." + } + } + }, + "Permission": { + "id": "Permission", + "type": "object", + "description": "A permission for a file. A permission grants a user, group, domain or the world access to a file or a folder hierarchy.", + "properties": { + "allowFileDiscovery": { + "type": "boolean", + "description": "Whether the permission allows the file to be discovered through search. This is only applicable for permissions of type domain or anyone." + }, + "displayName": { + "type": "string", + "description": "A displayable name for users, groups or domains." + }, + "domain": { + "type": "string", + "description": "The domain to which this permission refers." + }, + "emailAddress": { + "type": "string", + "description": "The email address of the user or group to which this permission refers." + }, + "id": { + "type": "string", + "description": "The ID of this permission. This is a unique identifier for the grantee, and is published in User resources as permissionId." + }, + "kind": { + "type": "string", + "description": "This is always drive#permission.", + "default": "drive#permission" + }, + "photoLink": { + "type": "string", + "description": "A link to the user's profile photo, if available." + }, + "role": { + "type": "string", + "description": "The role granted by this permission. Valid values are: \n- owner \n- writer \n- commenter \n- reader", + "annotations": { + "required": [ + "drive.permissions.create" + ] + } + }, + "type": { + "type": "string", + "description": "The type of the grantee. Valid values are: \n- user \n- group \n- domain \n- anyone", + "annotations": { + "required": [ + "drive.permissions.create" + ] + } + } + } + }, + "PermissionList": { + "id": "PermissionList", + "type": "object", + "description": "A list of permissions for a file.", + "properties": { + "kind": { + "type": "string", + "description": "This is always drive#permissionList.", + "default": "drive#permissionList" + }, + "permissions": { + "type": "array", + "description": "The full list of permissions.", + "items": { + "$ref": "Permission" + } + } + } + }, + "Reply": { + "id": "Reply", + "type": "object", + "description": "A reply to a comment on a file.", + "properties": { + "action": { + "type": "string", + "description": "The action the reply performed to the parent comment. Valid values are: \n- resolve \n- reopen" + }, + "author": { + "$ref": "User", + "description": "The user who created the reply." + }, + "content": { + "type": "string", + "description": "The plain text content of the reply. This field is used for setting the content, while htmlContent should be displayed. This is required on creates if no action is specified.", + "annotations": { + "required": [ + "drive.replies.update" + ] + } + }, + "createdTime": { + "type": "string", + "description": "The time at which the reply was created (RFC 3339 date-time).", + "format": "date-time" + }, + "deleted": { + "type": "boolean", + "description": "Whether the reply has been deleted. A deleted reply has no content." + }, + "htmlContent": { + "type": "string", + "description": "The content of the reply with HTML formatting." + }, + "id": { + "type": "string", + "description": "The ID of the reply." + }, + "kind": { + "type": "string", + "description": "This is always drive#reply.", + "default": "drive#reply" + }, + "modifiedTime": { + "type": "string", + "description": "The last time the reply was modified (RFC 3339 date-time).", + "format": "date-time" + } + } + }, + "ReplyList": { + "id": "ReplyList", + "type": "object", + "description": "A list of replies to a comment on a file.", + "properties": { + "kind": { + "type": "string", + "description": "This is always drive#replyList.", + "default": "drive#replyList" + }, + "nextPageToken": { + "type": "string", + "description": "The page token for the next page of replies. This will be absent if the end of the replies list has been reached." + }, + "replies": { + "type": "array", + "description": "The page of replies.", + "items": { + "$ref": "Reply" + } + } + } + }, + "Revision": { + "id": "Revision", + "type": "object", + "description": "The metadata for a revision to a file.", + "properties": { + "id": { + "type": "string", + "description": "The ID of the revision." + }, + "keepForever": { + "type": "boolean", + "description": "Whether to keep this revision forever, even if it is no longer the head revision. If not set, the revision will be automatically purged 30 days after newer content is uploaded. This can be set on a maximum of 200 revisions for a file.\nThis field is only applicable to files with binary content in Drive." + }, + "kind": { + "type": "string", + "description": "This is always drive#revision.", + "default": "drive#revision" + }, + "lastModifyingUser": { + "$ref": "User", + "description": "The last user to modify this revision." + }, + "md5Checksum": { + "type": "string", + "description": "The MD5 checksum of the revision's content. This is only applicable to files with binary content in Drive." + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the revision." + }, + "modifiedTime": { + "type": "string", + "description": "The last time the revision was modified (RFC 3339 date-time).", + "format": "date-time" + }, + "originalFilename": { + "type": "string", + "description": "The original filename used to create this revision. This is only applicable to files with binary content in Drive." + }, + "publishAuto": { + "type": "boolean", + "description": "Whether subsequent revisions will be automatically republished. This is only applicable to Google Docs." + }, + "published": { + "type": "boolean", + "description": "Whether this revision is published. This is only applicable to Google Docs." + }, + "publishedOutsideDomain": { + "type": "boolean", + "description": "Whether this revision is published outside the domain. This is only applicable to Google Docs." + }, + "size": { + "type": "string", + "description": "The size of the revision's content in bytes. This is only applicable to files with binary content in Drive.", + "format": "int64" + } + } + }, + "RevisionList": { + "id": "RevisionList", + "type": "object", + "description": "A list of revisions of a file.", + "properties": { + "kind": { + "type": "string", + "description": "This is always drive#revisionList.", + "default": "drive#revisionList" + }, + "revisions": { + "type": "array", + "description": "The full list of revisions.", + "items": { + "$ref": "Revision" + } + } + } + }, + "StartPageToken": { + "id": "StartPageToken", + "type": "object", + "properties": { + "kind": { + "type": "string", + "description": "This is always drive#startPageToken.", + "default": "drive#startPageToken" + }, + "startPageToken": { + "type": "string", + "description": "The starting page token for listing changes." + } + } + }, + "User": { + "id": "User", + "type": "object", + "description": "Information about a Drive user.", + "properties": { + "displayName": { + "type": "string", + "description": "A plain text displayable name for this user." + }, + "emailAddress": { + "type": "string", + "description": "The email address of the user. This may not be present in certain contexts if the user has not made their email address visible to the requester." + }, + "kind": { + "type": "string", + "description": "This is always drive#user.", + "default": "drive#user" + }, + "me": { + "type": "boolean", + "description": "Whether this user is the requesting user." + }, + "permissionId": { + "type": "string", + "description": "The user's ID as visible in Permission resources." + }, + "photoLink": { + "type": "string", + "description": "A link to the user's profile photo, if available." + } + } + } + }, + "resources": { + "about": { + "methods": { + "get": { + "id": "drive.about.get", + "path": "about", + "httpMethod": "GET", + "description": "Gets information about the user, the user's Drive, and system capabilities.", + "response": { + "$ref": "About" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + } + } + }, + "changes": { + "methods": { + "getStartPageToken": { + "id": "drive.changes.getStartPageToken", + "path": "changes/startPageToken", + "httpMethod": "GET", + "description": "Gets the starting pageToken for listing future changes.", + "response": { + "$ref": "StartPageToken" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "list": { + "id": "drive.changes.list", + "path": "changes", + "httpMethod": "GET", + "description": "Lists changes for a user.", + "parameters": { + "includeRemoved": { + "type": "boolean", + "description": "Whether to include changes indicating that items have left the view of the changes list, for example by deletion or lost access.", + "default": "true", + "location": "query" + }, + "pageSize": { + "type": "integer", + "description": "The maximum number of changes to return per page.", + "default": "100", + "format": "int32", + "minimum": "1", + "maximum": "1000", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.", + "required": true, + "location": "query" + }, + "restrictToMyDrive": { + "type": "boolean", + "description": "Whether to restrict the results to changes inside the My Drive hierarchy. This omits changes to files such as those in the Application Data folder or shared files which have not been added to My Drive.", + "default": "false", + "location": "query" + }, + "spaces": { + "type": "string", + "description": "A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", + "default": "drive", + "location": "query" + } + }, + "parameterOrder": [ + "pageToken" + ], + "response": { + "$ref": "ChangeList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsSubscription": true + }, + "watch": { + "id": "drive.changes.watch", + "path": "changes/watch", + "httpMethod": "POST", + "description": "Subscribes to changes for a user.", + "parameters": { + "includeRemoved": { + "type": "boolean", + "description": "Whether to include changes indicating that items have left the view of the changes list, for example by deletion or lost access.", + "default": "true", + "location": "query" + }, + "pageSize": { + "type": "integer", + "description": "The maximum number of changes to return per page.", + "default": "100", + "format": "int32", + "minimum": "1", + "maximum": "1000", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.", + "required": true, + "location": "query" + }, + "restrictToMyDrive": { + "type": "boolean", + "description": "Whether to restrict the results to changes inside the My Drive hierarchy. This omits changes to files such as those in the Application Data folder or shared files which have not been added to My Drive.", + "default": "false", + "location": "query" + }, + "spaces": { + "type": "string", + "description": "A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", + "default": "drive", + "location": "query" + } + }, + "parameterOrder": [ + "pageToken" + ], + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "response": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsSubscription": true + } + } + }, + "channels": { + "methods": { + "stop": { + "id": "drive.channels.stop", + "path": "channels/stop", + "httpMethod": "POST", + "description": "Stop watching resources through this channel", + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + } + } + }, + "comments": { + "methods": { + "create": { + "id": "drive.comments.create", + "path": "files/{fileId}/comments", + "httpMethod": "POST", + "description": "Creates a new comment on a file.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId" + ], + "request": { + "$ref": "Comment" + }, + "response": { + "$ref": "Comment" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "delete": { + "id": "drive.comments.delete", + "path": "files/{fileId}/comments/{commentId}", + "httpMethod": "DELETE", + "description": "Deletes a comment.", + "parameters": { + "commentId": { + "type": "string", + "description": "The ID of the comment.", + "required": true, + "location": "path" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "commentId" + ], + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "get": { + "id": "drive.comments.get", + "path": "files/{fileId}/comments/{commentId}", + "httpMethod": "GET", + "description": "Gets a comment by ID.", + "parameters": { + "commentId": { + "type": "string", + "description": "The ID of the comment.", + "required": true, + "location": "path" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "includeDeleted": { + "type": "boolean", + "description": "Whether to return deleted comments. Deleted comments will not include their original content.", + "default": "false", + "location": "query" + } + }, + "parameterOrder": [ + "fileId", + "commentId" + ], + "response": { + "$ref": "Comment" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "list": { + "id": "drive.comments.list", + "path": "files/{fileId}/comments", + "httpMethod": "GET", + "description": "Lists a file's comments.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "includeDeleted": { + "type": "boolean", + "description": "Whether to include deleted comments. Deleted comments will not include their original content.", + "default": "false", + "location": "query" + }, + "pageSize": { + "type": "integer", + "description": "The maximum number of comments to return per page.", + "default": "20", + "format": "int32", + "minimum": "1", + "maximum": "100", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + "location": "query" + }, + "startModifiedTime": { + "type": "string", + "description": "The minimum value of 'modifiedTime' for the result comments (RFC 3339 date-time).", + "location": "query" + } + }, + "parameterOrder": [ + "fileId" + ], + "response": { + "$ref": "CommentList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "update": { + "id": "drive.comments.update", + "path": "files/{fileId}/comments/{commentId}", + "httpMethod": "PATCH", + "description": "Updates a comment with patch semantics.", + "parameters": { + "commentId": { + "type": "string", + "description": "The ID of the comment.", + "required": true, + "location": "path" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "commentId" + ], + "request": { + "$ref": "Comment" + }, + "response": { + "$ref": "Comment" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + } + } + }, + "files": { + "methods": { + "copy": { + "id": "drive.files.copy", + "path": "files/{fileId}/copy", + "httpMethod": "POST", + "description": "Creates a copy of a file and applies any requested updates with patch semantics.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "ignoreDefaultVisibility": { + "type": "boolean", + "description": "Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.", + "default": "false", + "location": "query" + }, + "keepRevisionForever": { + "type": "boolean", + "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", + "default": "false", + "location": "query" + }, + "ocrLanguage": { + "type": "string", + "description": "A language hint for OCR processing during image import (ISO 639-1 code).", + "location": "query" + } + }, + "parameterOrder": [ + "fileId" + ], + "request": { + "$ref": "File" + }, + "response": { + "$ref": "File" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.photos.readonly" + ] + }, + "create": { + "id": "drive.files.create", + "path": "files", + "httpMethod": "POST", + "description": "Creates a new file.", + "parameters": { + "ignoreDefaultVisibility": { + "type": "boolean", + "description": "Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.", + "default": "false", + "location": "query" + }, + "keepRevisionForever": { + "type": "boolean", + "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", + "default": "false", + "location": "query" + }, + "ocrLanguage": { + "type": "string", + "description": "A language hint for OCR processing during image import (ISO 639-1 code).", + "location": "query" + }, + "useContentAsIndexableText": { + "type": "boolean", + "description": "Whether to use the uploaded content as indexable text.", + "default": "false", + "location": "query" + } + }, + "request": { + "$ref": "File" + }, + "response": { + "$ref": "File" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file" + ], + "supportsMediaUpload": true, + "mediaUpload": { + "accept": [ + "*/*" + ], + "maxSize": "5120GB", + "protocols": { + "simple": { + "multipart": true, + "path": "/upload/drive/v3/files" + }, + "resumable": { + "multipart": true, + "path": "/resumable/upload/drive/v3/files" + } + } + }, + "supportsSubscription": true + }, + "delete": { + "id": "drive.files.delete", + "path": "files/{fileId}", + "httpMethod": "DELETE", + "description": "Permanently deletes a file owned by the user without moving it to the trash. If the target is a folder, all descendants owned by the user are also deleted.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId" + ], + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "emptyTrash": { + "id": "drive.files.emptyTrash", + "path": "files/trash", + "httpMethod": "DELETE", + "description": "Permanently deletes all of the user's trashed files.", + "scopes": [ + "https://www.googleapis.com/auth/drive" + ] + }, + "export": { + "id": "drive.files.export", + "path": "files/{fileId}/export", + "httpMethod": "GET", + "description": "Exports a Google Doc to the requested MIME type and returns the exported content.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the format requested for this export.", + "required": true, + "location": "query" + } + }, + "parameterOrder": [ + "fileId", + "mimeType" + ], + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsMediaDownload": true + }, + "generateIds": { + "id": "drive.files.generateIds", + "path": "files/generateIds", + "httpMethod": "GET", + "description": "Generates a set of file IDs which can be provided in create requests.", + "parameters": { + "count": { + "type": "integer", + "description": "The number of IDs to return.", + "default": "10", + "format": "int32", + "minimum": "1", + "maximum": "1000", + "location": "query" + }, + "space": { + "type": "string", + "description": "The space in which the IDs can be used to create new files. Supported values are 'drive' and 'appDataFolder'.", + "default": "drive", + "location": "query" + } + }, + "response": { + "$ref": "GeneratedIds" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "get": { + "id": "drive.files.get", + "path": "files/{fileId}", + "httpMethod": "GET", + "description": "Gets a file's metadata or content by ID.", + "parameters": { + "acknowledgeAbuse": { + "type": "boolean", + "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", + "default": "false", + "location": "query" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId" + ], + "response": { + "$ref": "File" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true, + "supportsSubscription": true + }, + "list": { + "id": "drive.files.list", + "path": "files", + "httpMethod": "GET", + "description": "Lists or searches files.", + "parameters": { + "corpus": { + "type": "string", + "description": "The source of files to list.", + "default": "user", + "enum": [ + "domain", + "user" + ], + "enumDescriptions": [ + "Files shared to the user's domain.", + "Files owned by or shared to the user." + ], + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "A comma-separated list of sort keys. Valid keys are 'createdTime', 'folder', 'modifiedByMeTime', 'modifiedTime', 'name', 'quotaBytesUsed', 'recency', 'sharedWithMeTime', 'starred', and 'viewedByMeTime'. Each key sorts ascending by default, but may be reversed with the 'desc' modifier. Example usage: ?orderBy=folder,modifiedTime desc,name. Please note that there is a current limitation for users with approximately one million files in which the requested sort order is ignored.", + "location": "query" + }, + "pageSize": { + "type": "integer", + "description": "The maximum number of files to return per page.", + "default": "100", + "format": "int32", + "minimum": "1", + "maximum": "1000", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + "location": "query" + }, + "q": { + "type": "string", + "description": "A query for filtering the file results. See the \"Search for Files\" guide for supported syntax.", + "location": "query" + }, + "spaces": { + "type": "string", + "description": "A comma-separated list of spaces to query within the corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", + "default": "drive", + "location": "query" + } + }, + "response": { + "$ref": "FileList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "update": { + "id": "drive.files.update", + "path": "files/{fileId}", + "httpMethod": "PATCH", + "description": "Updates a file's metadata and/or content with patch semantics.", + "parameters": { + "addParents": { + "type": "string", + "description": "A comma-separated list of parent IDs to add.", + "location": "query" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "keepRevisionForever": { + "type": "boolean", + "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", + "default": "false", + "location": "query" + }, + "ocrLanguage": { + "type": "string", + "description": "A language hint for OCR processing during image import (ISO 639-1 code).", + "location": "query" + }, + "removeParents": { + "type": "string", + "description": "A comma-separated list of parent IDs to remove.", + "location": "query" + }, + "useContentAsIndexableText": { + "type": "boolean", + "description": "Whether to use the uploaded content as indexable text.", + "default": "false", + "location": "query" + } + }, + "parameterOrder": [ + "fileId" + ], + "request": { + "$ref": "File" + }, + "response": { + "$ref": "File" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.scripts" + ], + "supportsMediaUpload": true, + "mediaUpload": { + "accept": [ + "*/*" + ], + "maxSize": "5120GB", + "protocols": { + "simple": { + "multipart": true, + "path": "/upload/drive/v3/files/{fileId}" + }, + "resumable": { + "multipart": true, + "path": "/resumable/upload/drive/v3/files/{fileId}" + } + } + } + }, + "watch": { + "id": "drive.files.watch", + "path": "files/{fileId}/watch", + "httpMethod": "POST", + "description": "Subscribes to changes to a file", + "parameters": { + "acknowledgeAbuse": { + "type": "boolean", + "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", + "default": "false", + "location": "query" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId" + ], + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "response": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true, + "supportsSubscription": true + } + } + }, + "permissions": { + "methods": { + "create": { + "id": "drive.permissions.create", + "path": "files/{fileId}/permissions", + "httpMethod": "POST", + "description": "Creates a permission for a file.", + "parameters": { + "emailMessage": { + "type": "string", + "description": "A custom message to include in the notification email.", + "location": "query" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "sendNotificationEmail": { + "type": "boolean", + "description": "Whether to send a notification email when sharing to users or groups. This defaults to true for users and groups, and is not allowed for other requests. It must not be disabled for ownership transfers.", + "location": "query" + }, + "transferOwnership": { + "type": "boolean", + "description": "Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of the side effect.", + "default": "false", + "location": "query" + } + }, + "parameterOrder": [ + "fileId" + ], + "request": { + "$ref": "Permission" + }, + "response": { + "$ref": "Permission" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "delete": { + "id": "drive.permissions.delete", + "path": "files/{fileId}/permissions/{permissionId}", + "httpMethod": "DELETE", + "description": "Deletes a permission.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "permissionId": { + "type": "string", + "description": "The ID of the permission.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "permissionId" + ], + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "get": { + "id": "drive.permissions.get", + "path": "files/{fileId}/permissions/{permissionId}", + "httpMethod": "GET", + "description": "Gets a permission by ID.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "permissionId": { + "type": "string", + "description": "The ID of the permission.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "permissionId" + ], + "response": { + "$ref": "Permission" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "list": { + "id": "drive.permissions.list", + "path": "files/{fileId}/permissions", + "httpMethod": "GET", + "description": "Lists a file's permissions.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId" + ], + "response": { + "$ref": "PermissionList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "update": { + "id": "drive.permissions.update", + "path": "files/{fileId}/permissions/{permissionId}", + "httpMethod": "PATCH", + "description": "Updates a permission with patch semantics.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "permissionId": { + "type": "string", + "description": "The ID of the permission.", + "required": true, + "location": "path" + }, + "transferOwnership": { + "type": "boolean", + "description": "Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of the side effect.", + "default": "false", + "location": "query" + } + }, + "parameterOrder": [ + "fileId", + "permissionId" + ], + "request": { + "$ref": "Permission" + }, + "response": { + "$ref": "Permission" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + } + } + }, + "replies": { + "methods": { + "create": { + "id": "drive.replies.create", + "path": "files/{fileId}/comments/{commentId}/replies", + "httpMethod": "POST", + "description": "Creates a new reply to a comment.", + "parameters": { + "commentId": { + "type": "string", + "description": "The ID of the comment.", + "required": true, + "location": "path" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "commentId" + ], + "request": { + "$ref": "Reply" + }, + "response": { + "$ref": "Reply" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "delete": { + "id": "drive.replies.delete", + "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", + "httpMethod": "DELETE", + "description": "Deletes a reply.", + "parameters": { + "commentId": { + "type": "string", + "description": "The ID of the comment.", + "required": true, + "location": "path" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "replyId": { + "type": "string", + "description": "The ID of the reply.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "commentId", + "replyId" + ], + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "get": { + "id": "drive.replies.get", + "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", + "httpMethod": "GET", + "description": "Gets a reply by ID.", + "parameters": { + "commentId": { + "type": "string", + "description": "The ID of the comment.", + "required": true, + "location": "path" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "includeDeleted": { + "type": "boolean", + "description": "Whether to return deleted replies. Deleted replies will not include their original content.", + "default": "false", + "location": "query" + }, + "replyId": { + "type": "string", + "description": "The ID of the reply.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "commentId", + "replyId" + ], + "response": { + "$ref": "Reply" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "list": { + "id": "drive.replies.list", + "path": "files/{fileId}/comments/{commentId}/replies", + "httpMethod": "GET", + "description": "Lists a comment's replies.", + "parameters": { + "commentId": { + "type": "string", + "description": "The ID of the comment.", + "required": true, + "location": "path" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "includeDeleted": { + "type": "boolean", + "description": "Whether to include deleted replies. Deleted replies will not include their original content.", + "default": "false", + "location": "query" + }, + "pageSize": { + "type": "integer", + "description": "The maximum number of replies to return per page.", + "default": "20", + "format": "int32", + "minimum": "1", + "maximum": "100", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + "location": "query" + } + }, + "parameterOrder": [ + "fileId", + "commentId" + ], + "response": { + "$ref": "ReplyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "update": { + "id": "drive.replies.update", + "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", + "httpMethod": "PATCH", + "description": "Updates a reply with patch semantics.", + "parameters": { + "commentId": { + "type": "string", + "description": "The ID of the comment.", + "required": true, + "location": "path" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "replyId": { + "type": "string", + "description": "The ID of the reply.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "commentId", + "replyId" + ], + "request": { + "$ref": "Reply" + }, + "response": { + "$ref": "Reply" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.file" + ] + } + } + }, + "revisions": { + "methods": { + "delete": { + "id": "drive.revisions.delete", + "path": "files/{fileId}/revisions/{revisionId}", + "httpMethod": "DELETE", + "description": "Permanently deletes a revision. This method is only applicable to files with binary content in Drive.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "revisionId": { + "type": "string", + "description": "The ID of the revision.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "revisionId" + ], + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file" + ] + }, + "get": { + "id": "drive.revisions.get", + "path": "files/{fileId}/revisions/{revisionId}", + "httpMethod": "GET", + "description": "Gets a revision's metadata or content by ID.", + "parameters": { + "acknowledgeAbuse": { + "type": "boolean", + "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", + "default": "false", + "location": "query" + }, + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "revisionId": { + "type": "string", + "description": "The ID of the revision.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "revisionId" + ], + "response": { + "$ref": "Revision" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "list": { + "id": "drive.revisions.list", + "path": "files/{fileId}/revisions", + "httpMethod": "GET", + "description": "Lists a file's revisions.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId" + ], + "response": { + "$ref": "RevisionList" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive.metadata", + "https://www.googleapis.com/auth/drive.metadata.readonly", + "https://www.googleapis.com/auth/drive.photos.readonly", + "https://www.googleapis.com/auth/drive.readonly" + ] + }, + "update": { + "id": "drive.revisions.update", + "path": "files/{fileId}/revisions/{revisionId}", + "httpMethod": "PATCH", + "description": "Updates a revision with patch semantics.", + "parameters": { + "fileId": { + "type": "string", + "description": "The ID of the file.", + "required": true, + "location": "path" + }, + "revisionId": { + "type": "string", + "description": "The ID of the revision.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "fileId", + "revisionId" + ], + "request": { + "$ref": "Revision" + }, + "response": { + "$ref": "Revision" + }, + "scopes": [ + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.appdata", + "https://www.googleapis.com/auth/drive.file" + ] + } + } + } + } +} diff --git a/vendor/google.golang.org/api/drive/v3/drive-gen.go b/vendor/google.golang.org/api/drive/v3/drive-gen.go new file mode 100644 index 00000000..9e6e0874 --- /dev/null +++ b/vendor/google.golang.org/api/drive/v3/drive-gen.go @@ -0,0 +1,6434 @@ +// Package drive provides access to the Drive API. +// +// See https://developers.google.com/drive/ +// +// Usage example: +// +// import "google.golang.org/api/drive/v3" +// ... +// driveService, err := drive.New(oauthHttpClient) +package drive + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "drive:v3" +const apiName = "drive" +const apiVersion = "v3" +const basePath = "https://www.googleapis.com/drive/v3/" + +// OAuth2 scopes used by this API. +const ( + // View and manage the files in your Google Drive + DriveScope = "https://www.googleapis.com/auth/drive" + + // View and manage its own configuration data in your Google Drive + DriveAppdataScope = "https://www.googleapis.com/auth/drive.appdata" + + // View and manage Google Drive files and folders that you have opened + // or created with this app + DriveFileScope = "https://www.googleapis.com/auth/drive.file" + + // View and manage metadata of files in your Google Drive + DriveMetadataScope = "https://www.googleapis.com/auth/drive.metadata" + + // View metadata for files in your Google Drive + DriveMetadataReadonlyScope = "https://www.googleapis.com/auth/drive.metadata.readonly" + + // View the photos, videos and albums in your Google Photos + DrivePhotosReadonlyScope = "https://www.googleapis.com/auth/drive.photos.readonly" + + // View the files in your Google Drive + DriveReadonlyScope = "https://www.googleapis.com/auth/drive.readonly" + + // Modify your Google Apps Script scripts' behavior + DriveScriptsScope = "https://www.googleapis.com/auth/drive.scripts" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.About = NewAboutService(s) + s.Changes = NewChangesService(s) + s.Channels = NewChannelsService(s) + s.Comments = NewCommentsService(s) + s.Files = NewFilesService(s) + s.Permissions = NewPermissionsService(s) + s.Replies = NewRepliesService(s) + s.Revisions = NewRevisionsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + About *AboutService + + Changes *ChangesService + + Channels *ChannelsService + + Comments *CommentsService + + Files *FilesService + + Permissions *PermissionsService + + Replies *RepliesService + + Revisions *RevisionsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewAboutService(s *Service) *AboutService { + rs := &AboutService{s: s} + return rs +} + +type AboutService struct { + s *Service +} + +func NewChangesService(s *Service) *ChangesService { + rs := &ChangesService{s: s} + return rs +} + +type ChangesService struct { + s *Service +} + +func NewChannelsService(s *Service) *ChannelsService { + rs := &ChannelsService{s: s} + return rs +} + +type ChannelsService struct { + s *Service +} + +func NewCommentsService(s *Service) *CommentsService { + rs := &CommentsService{s: s} + return rs +} + +type CommentsService struct { + s *Service +} + +func NewFilesService(s *Service) *FilesService { + rs := &FilesService{s: s} + return rs +} + +type FilesService struct { + s *Service +} + +func NewPermissionsService(s *Service) *PermissionsService { + rs := &PermissionsService{s: s} + return rs +} + +type PermissionsService struct { + s *Service +} + +func NewRepliesService(s *Service) *RepliesService { + rs := &RepliesService{s: s} + return rs +} + +type RepliesService struct { + s *Service +} + +func NewRevisionsService(s *Service) *RevisionsService { + rs := &RevisionsService{s: s} + return rs +} + +type RevisionsService struct { + s *Service +} + +// About: Information about the user, the user's Drive, and system +// capabilities. +type About struct { + // AppInstalled: Whether the user has installed the requesting app. + AppInstalled bool `json:"appInstalled,omitempty"` + + // ExportFormats: A map of source MIME type to possible targets for all + // supported exports. + ExportFormats map[string][]string `json:"exportFormats,omitempty"` + + // FolderColorPalette: The currently supported folder colors as RGB hex + // strings. + FolderColorPalette []string `json:"folderColorPalette,omitempty"` + + // ImportFormats: A map of source MIME type to possible targets for all + // supported imports. + ImportFormats map[string][]string `json:"importFormats,omitempty"` + + // Kind: This is always drive#about. + Kind string `json:"kind,omitempty"` + + // MaxImportSizes: A map of maximum import sizes by MIME type, in bytes. + MaxImportSizes map[string]string `json:"maxImportSizes,omitempty"` + + // MaxUploadSize: The maximum upload size in bytes. + MaxUploadSize int64 `json:"maxUploadSize,omitempty,string"` + + // StorageQuota: The user's storage quota limits and usage. All fields + // are measured in bytes. + StorageQuota *AboutStorageQuota `json:"storageQuota,omitempty"` + + // User: The authenticated user. + User *User `json:"user,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AppInstalled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *About) MarshalJSON() ([]byte, error) { + type noMethod About + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AboutStorageQuota: The user's storage quota limits and usage. All +// fields are measured in bytes. +type AboutStorageQuota struct { + // Limit: The usage limit, if applicable. This will not be present if + // the user has unlimited storage. + Limit int64 `json:"limit,omitempty,string"` + + // Usage: The total usage across all services. + Usage int64 `json:"usage,omitempty,string"` + + // UsageInDrive: The usage by all files in Google Drive. + UsageInDrive int64 `json:"usageInDrive,omitempty,string"` + + // UsageInDriveTrash: The usage by trashed files in Google Drive. + UsageInDriveTrash int64 `json:"usageInDriveTrash,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "Limit") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AboutStorageQuota) MarshalJSON() ([]byte, error) { + type noMethod AboutStorageQuota + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Change: A change to a file. +type Change struct { + // File: The updated state of the file. Present if the file has not been + // removed. + File *File `json:"file,omitempty"` + + // FileId: The ID of the file which has changed. + FileId string `json:"fileId,omitempty"` + + // Kind: This is always drive#change. + Kind string `json:"kind,omitempty"` + + // Removed: Whether the file has been removed from the view of the + // changes list, for example by deletion or lost access. + Removed bool `json:"removed,omitempty"` + + // Time: The time of this change (RFC 3339 date-time). + Time string `json:"time,omitempty"` + + // ForceSendFields is a list of field names (e.g. "File") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Change) MarshalJSON() ([]byte, error) { + type noMethod Change + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ChangeList: A list of changes for a user. +type ChangeList struct { + // Changes: The page of changes. + Changes []*Change `json:"changes,omitempty"` + + // Kind: This is always drive#changeList. + Kind string `json:"kind,omitempty"` + + // NewStartPageToken: The starting page token for future changes. This + // will be present only if the end of the current changes list has been + // reached. + NewStartPageToken string `json:"newStartPageToken,omitempty"` + + // NextPageToken: The page token for the next page of changes. This will + // be absent if the end of the current changes list has been reached. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Changes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ChangeList) MarshalJSON() ([]byte, error) { + type noMethod ChangeList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Channel: An notification channel used to watch for resource changes. +type Channel struct { + // Address: The address where notifications are delivered for this + // channel. + Address string `json:"address,omitempty"` + + // Expiration: Date and time of notification channel expiration, + // expressed as a Unix timestamp, in milliseconds. Optional. + Expiration int64 `json:"expiration,omitempty,string"` + + // Id: A UUID or similar unique string that identifies this channel. + Id string `json:"id,omitempty"` + + // Kind: Identifies this as a notification channel used to watch for + // changes to a resource. Value: the fixed string "api#channel". + Kind string `json:"kind,omitempty"` + + // Params: Additional parameters controlling delivery channel behavior. + // Optional. + Params map[string]string `json:"params,omitempty"` + + // Payload: A Boolean value to indicate whether payload is wanted. + // Optional. + Payload bool `json:"payload,omitempty"` + + // ResourceId: An opaque ID that identifies the resource being watched + // on this channel. Stable across different API versions. + ResourceId string `json:"resourceId,omitempty"` + + // ResourceUri: A version-specific identifier for the watched resource. + ResourceUri string `json:"resourceUri,omitempty"` + + // Token: An arbitrary string delivered to the target address with each + // notification delivered over this channel. Optional. + Token string `json:"token,omitempty"` + + // Type: The type of delivery mechanism used for this channel. + Type string `json:"type,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Address") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Channel) MarshalJSON() ([]byte, error) { + type noMethod Channel + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Comment: A comment on a file. +type Comment struct { + // Anchor: A region of the document represented as a JSON string. See + // anchor documentation for details on how to define and interpret + // anchor properties. + Anchor string `json:"anchor,omitempty"` + + // Author: The user who created the comment. + Author *User `json:"author,omitempty"` + + // Content: The plain text content of the comment. This field is used + // for setting the content, while htmlContent should be displayed. + Content string `json:"content,omitempty"` + + // CreatedTime: The time at which the comment was created (RFC 3339 + // date-time). + CreatedTime string `json:"createdTime,omitempty"` + + // Deleted: Whether the comment has been deleted. A deleted comment has + // no content. + Deleted bool `json:"deleted,omitempty"` + + // HtmlContent: The content of the comment with HTML formatting. + HtmlContent string `json:"htmlContent,omitempty"` + + // Id: The ID of the comment. + Id string `json:"id,omitempty"` + + // Kind: This is always drive#comment. + Kind string `json:"kind,omitempty"` + + // ModifiedTime: The last time the comment or any of its replies was + // modified (RFC 3339 date-time). + ModifiedTime string `json:"modifiedTime,omitempty"` + + // QuotedFileContent: The file content to which the comment refers, + // typically within the anchor region. For a text file, for example, + // this would be the text at the location of the comment. + QuotedFileContent *CommentQuotedFileContent `json:"quotedFileContent,omitempty"` + + // Replies: The full list of replies to the comment in chronological + // order. + Replies []*Reply `json:"replies,omitempty"` + + // Resolved: Whether the comment has been resolved by one of its + // replies. + Resolved bool `json:"resolved,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Anchor") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Comment) MarshalJSON() ([]byte, error) { + type noMethod Comment + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// CommentQuotedFileContent: The file content to which the comment +// refers, typically within the anchor region. For a text file, for +// example, this would be the text at the location of the comment. +type CommentQuotedFileContent struct { + // MimeType: The MIME type of the quoted content. + MimeType string `json:"mimeType,omitempty"` + + // Value: The quoted content itself. This is interpreted as plain text + // if set through the API. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MimeType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *CommentQuotedFileContent) MarshalJSON() ([]byte, error) { + type noMethod CommentQuotedFileContent + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// CommentList: A list of comments on a file. +type CommentList struct { + // Comments: The page of comments. + Comments []*Comment `json:"comments,omitempty"` + + // Kind: This is always drive#commentList. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The page token for the next page of comments. This + // will be absent if the end of the comments list has been reached. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Comments") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *CommentList) MarshalJSON() ([]byte, error) { + type noMethod CommentList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// File: The metadata for a file. +type File struct { + // AppProperties: A collection of arbitrary key-value pairs which are + // private to the requesting app. + // Entries with null values are cleared in update and copy requests. + AppProperties map[string]string `json:"appProperties,omitempty"` + + // Capabilities: Capabilities the current user has on the file. + Capabilities *FileCapabilities `json:"capabilities,omitempty"` + + // ContentHints: Additional information about the content of the file. + // These fields are never populated in responses. + ContentHints *FileContentHints `json:"contentHints,omitempty"` + + // CreatedTime: The time at which the file was created (RFC 3339 + // date-time). + CreatedTime string `json:"createdTime,omitempty"` + + // Description: A short description of the file. + Description string `json:"description,omitempty"` + + // ExplicitlyTrashed: Whether the file has been explicitly trashed, as + // opposed to recursively trashed from a parent folder. + ExplicitlyTrashed bool `json:"explicitlyTrashed,omitempty"` + + // FileExtension: The final component of fullFileExtension. This is only + // available for files with binary content in Drive. + FileExtension string `json:"fileExtension,omitempty"` + + // FolderColorRgb: The color for a folder as an RGB hex string. The + // supported colors are published in the folderColorPalette field of the + // About resource. + // If an unsupported color is specified, the closest color in the + // palette will be used instead. + FolderColorRgb string `json:"folderColorRgb,omitempty"` + + // FullFileExtension: The full file extension extracted from the name + // field. May contain multiple concatenated extensions, such as + // "tar.gz". This is only available for files with binary content in + // Drive. + // This is automatically updated when the name field changes, however it + // is not cleared if the new name does not contain a valid extension. + FullFileExtension string `json:"fullFileExtension,omitempty"` + + // HeadRevisionId: The ID of the file's head revision. This is currently + // only available for files with binary content in Drive. + HeadRevisionId string `json:"headRevisionId,omitempty"` + + // IconLink: A static, unauthenticated link to the file's icon. + IconLink string `json:"iconLink,omitempty"` + + // Id: The ID of the file. + Id string `json:"id,omitempty"` + + // ImageMediaMetadata: Additional metadata about image media, if + // available. + ImageMediaMetadata *FileImageMediaMetadata `json:"imageMediaMetadata,omitempty"` + + // Kind: This is always drive#file. + Kind string `json:"kind,omitempty"` + + // LastModifyingUser: The last user to modify the file. + LastModifyingUser *User `json:"lastModifyingUser,omitempty"` + + // Md5Checksum: The MD5 checksum for the content of the file. This is + // only applicable to files with binary content in Drive. + Md5Checksum string `json:"md5Checksum,omitempty"` + + // MimeType: The MIME type of the file. + // Drive will attempt to automatically detect an appropriate value from + // uploaded content if no value is provided. The value cannot be changed + // unless a new revision is uploaded. + // If a file is created with a Google Doc MIME type, the uploaded + // content will be imported if possible. The supported import formats + // are published in the About resource. + MimeType string `json:"mimeType,omitempty"` + + // ModifiedByMeTime: The last time the file was modified by the user + // (RFC 3339 date-time). + ModifiedByMeTime string `json:"modifiedByMeTime,omitempty"` + + // ModifiedTime: The last time the file was modified by anyone (RFC 3339 + // date-time). + // Note that setting modifiedTime will also update modifiedByMeTime for + // the user. + ModifiedTime string `json:"modifiedTime,omitempty"` + + // Name: The name of the file. This is not necessarily unique within a + // folder. + Name string `json:"name,omitempty"` + + // OriginalFilename: The original filename of the uploaded content if + // available, or else the original value of the name field. This is only + // available for files with binary content in Drive. + OriginalFilename string `json:"originalFilename,omitempty"` + + // OwnedByMe: Whether the user owns the file. + OwnedByMe bool `json:"ownedByMe,omitempty"` + + // Owners: The owners of the file. Currently, only certain legacy files + // may have more than one owner. + Owners []*User `json:"owners,omitempty"` + + // Parents: The IDs of the parent folders which contain the file. + // If not specified as part of a create request, the file will be placed + // directly in the My Drive folder. Update requests must use the + // addParents and removeParents parameters to modify the values. + Parents []string `json:"parents,omitempty"` + + // Permissions: The full list of permissions for the file. This is only + // available if the requesting user can share the file. + Permissions []*Permission `json:"permissions,omitempty"` + + // Properties: A collection of arbitrary key-value pairs which are + // visible to all apps. + // Entries with null values are cleared in update and copy requests. + Properties map[string]string `json:"properties,omitempty"` + + // QuotaBytesUsed: The number of storage quota bytes used by the file. + // This includes the head revision as well as previous revisions with + // keepForever enabled. + QuotaBytesUsed int64 `json:"quotaBytesUsed,omitempty,string"` + + // Shared: Whether the file has been shared. + Shared bool `json:"shared,omitempty"` + + // SharedWithMeTime: The time at which the file was shared with the + // user, if applicable (RFC 3339 date-time). + SharedWithMeTime string `json:"sharedWithMeTime,omitempty"` + + // SharingUser: The user who shared the file with the requesting user, + // if applicable. + SharingUser *User `json:"sharingUser,omitempty"` + + // Size: The size of the file's content in bytes. This is only + // applicable to files with binary content in Drive. + Size int64 `json:"size,omitempty,string"` + + // Spaces: The list of spaces which contain the file. The currently + // supported values are 'drive', 'appDataFolder' and 'photos'. + Spaces []string `json:"spaces,omitempty"` + + // Starred: Whether the user has starred the file. + Starred bool `json:"starred,omitempty"` + + // ThumbnailLink: A short-lived link to the file's thumbnail, if + // available. Typically lasts on the order of hours. + ThumbnailLink string `json:"thumbnailLink,omitempty"` + + // Trashed: Whether the file has been trashed, either explicitly or from + // a trashed parent folder. Only the owner may trash a file, and other + // users cannot see files in the owner's trash. + Trashed bool `json:"trashed,omitempty"` + + // Version: A monotonically increasing version number for the file. This + // reflects every change made to the file on the server, even those not + // visible to the user. + Version int64 `json:"version,omitempty,string"` + + // VideoMediaMetadata: Additional metadata about video media. This may + // not be available immediately upon upload. + VideoMediaMetadata *FileVideoMediaMetadata `json:"videoMediaMetadata,omitempty"` + + // ViewedByMe: Whether the file has been viewed by this user. + ViewedByMe bool `json:"viewedByMe,omitempty"` + + // ViewedByMeTime: The last time the file was viewed by the user (RFC + // 3339 date-time). + ViewedByMeTime string `json:"viewedByMeTime,omitempty"` + + // ViewersCanCopyContent: Whether users with only reader or commenter + // permission can copy the file's content. This affects copy, download, + // and print operations. + ViewersCanCopyContent bool `json:"viewersCanCopyContent,omitempty"` + + // WebContentLink: A link for downloading the content of the file in a + // browser. This is only available for files with binary content in + // Drive. + WebContentLink string `json:"webContentLink,omitempty"` + + // WebViewLink: A link for opening the file in a relevant Google editor + // or viewer in a browser. + WebViewLink string `json:"webViewLink,omitempty"` + + // WritersCanShare: Whether users with only writer permission can modify + // the file's permissions. + WritersCanShare bool `json:"writersCanShare,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AppProperties") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *File) MarshalJSON() ([]byte, error) { + type noMethod File + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FileCapabilities: Capabilities the current user has on the file. +type FileCapabilities struct { + // CanComment: Whether the user can comment on the file. + CanComment bool `json:"canComment,omitempty"` + + // CanCopy: Whether the user can copy the file. + CanCopy bool `json:"canCopy,omitempty"` + + // CanEdit: Whether the user can edit the file's content. + CanEdit bool `json:"canEdit,omitempty"` + + // CanReadRevisions: Whether the current user has read access to the + // Revisions resource of the file. + CanReadRevisions bool `json:"canReadRevisions,omitempty"` + + // CanShare: Whether the user can modify the file's permissions and + // sharing settings. + CanShare bool `json:"canShare,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CanComment") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FileCapabilities) MarshalJSON() ([]byte, error) { + type noMethod FileCapabilities + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FileContentHints: Additional information about the content of the +// file. These fields are never populated in responses. +type FileContentHints struct { + // IndexableText: Text to be indexed for the file to improve fullText + // queries. This is limited to 128KB in length and may contain HTML + // elements. + IndexableText string `json:"indexableText,omitempty"` + + // Thumbnail: A thumbnail for the file. This will only be used if Drive + // cannot generate a standard thumbnail. + Thumbnail *FileContentHintsThumbnail `json:"thumbnail,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IndexableText") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FileContentHints) MarshalJSON() ([]byte, error) { + type noMethod FileContentHints + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FileContentHintsThumbnail: A thumbnail for the file. This will only +// be used if Drive cannot generate a standard thumbnail. +type FileContentHintsThumbnail struct { + // Image: The thumbnail data encoded with URL-safe Base64 (RFC 4648 + // section 5). + Image string `json:"image,omitempty"` + + // MimeType: The MIME type of the thumbnail. + MimeType string `json:"mimeType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Image") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FileContentHintsThumbnail) MarshalJSON() ([]byte, error) { + type noMethod FileContentHintsThumbnail + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FileImageMediaMetadata: Additional metadata about image media, if +// available. +type FileImageMediaMetadata struct { + // Aperture: The aperture used to create the photo (f-number). + Aperture float64 `json:"aperture,omitempty"` + + // CameraMake: The make of the camera used to create the photo. + CameraMake string `json:"cameraMake,omitempty"` + + // CameraModel: The model of the camera used to create the photo. + CameraModel string `json:"cameraModel,omitempty"` + + // ColorSpace: The color space of the photo. + ColorSpace string `json:"colorSpace,omitempty"` + + // ExposureBias: The exposure bias of the photo (APEX value). + ExposureBias float64 `json:"exposureBias,omitempty"` + + // ExposureMode: The exposure mode used to create the photo. + ExposureMode string `json:"exposureMode,omitempty"` + + // ExposureTime: The length of the exposure, in seconds. + ExposureTime float64 `json:"exposureTime,omitempty"` + + // FlashUsed: Whether a flash was used to create the photo. + FlashUsed bool `json:"flashUsed,omitempty"` + + // FocalLength: The focal length used to create the photo, in + // millimeters. + FocalLength float64 `json:"focalLength,omitempty"` + + // Height: The height of the image in pixels. + Height int64 `json:"height,omitempty"` + + // IsoSpeed: The ISO speed used to create the photo. + IsoSpeed int64 `json:"isoSpeed,omitempty"` + + // Lens: The lens used to create the photo. + Lens string `json:"lens,omitempty"` + + // Location: Geographic location information stored in the image. + Location *FileImageMediaMetadataLocation `json:"location,omitempty"` + + // MaxApertureValue: The smallest f-number of the lens at the focal + // length used to create the photo (APEX value). + MaxApertureValue float64 `json:"maxApertureValue,omitempty"` + + // MeteringMode: The metering mode used to create the photo. + MeteringMode string `json:"meteringMode,omitempty"` + + // Rotation: The rotation in clockwise degrees from the image's original + // orientation. + Rotation int64 `json:"rotation,omitempty"` + + // Sensor: The type of sensor used to create the photo. + Sensor string `json:"sensor,omitempty"` + + // SubjectDistance: The distance to the subject of the photo, in meters. + SubjectDistance int64 `json:"subjectDistance,omitempty"` + + // Time: The date and time the photo was taken (EXIF DateTime). + Time string `json:"time,omitempty"` + + // WhiteBalance: The white balance mode used to create the photo. + WhiteBalance string `json:"whiteBalance,omitempty"` + + // Width: The width of the image in pixels. + Width int64 `json:"width,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Aperture") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FileImageMediaMetadata) MarshalJSON() ([]byte, error) { + type noMethod FileImageMediaMetadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FileImageMediaMetadataLocation: Geographic location information +// stored in the image. +type FileImageMediaMetadataLocation struct { + // Altitude: The altitude stored in the image. + Altitude float64 `json:"altitude,omitempty"` + + // Latitude: The latitude stored in the image. + Latitude float64 `json:"latitude,omitempty"` + + // Longitude: The longitude stored in the image. + Longitude float64 `json:"longitude,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Altitude") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FileImageMediaMetadataLocation) MarshalJSON() ([]byte, error) { + type noMethod FileImageMediaMetadataLocation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FileVideoMediaMetadata: Additional metadata about video media. This +// may not be available immediately upon upload. +type FileVideoMediaMetadata struct { + // DurationMillis: The duration of the video in milliseconds. + DurationMillis int64 `json:"durationMillis,omitempty,string"` + + // Height: The height of the video in pixels. + Height int64 `json:"height,omitempty"` + + // Width: The width of the video in pixels. + Width int64 `json:"width,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DurationMillis") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FileVideoMediaMetadata) MarshalJSON() ([]byte, error) { + type noMethod FileVideoMediaMetadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FileList: A list of files. +type FileList struct { + // Files: The page of files. + Files []*File `json:"files,omitempty"` + + // Kind: This is always drive#fileList. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The page token for the next page of files. This will + // be absent if the end of the files list has been reached. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Files") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FileList) MarshalJSON() ([]byte, error) { + type noMethod FileList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// GeneratedIds: A list of generated file IDs which can be provided in +// create requests. +type GeneratedIds struct { + // Ids: The IDs generated for the requesting user in the specified + // space. + Ids []string `json:"ids,omitempty"` + + // Kind: This is always drive#generatedIds + Kind string `json:"kind,omitempty"` + + // Space: The type of file that can be created with these IDs. + Space string `json:"space,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Ids") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *GeneratedIds) MarshalJSON() ([]byte, error) { + type noMethod GeneratedIds + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Permission: A permission for a file. A permission grants a user, +// group, domain or the world access to a file or a folder hierarchy. +type Permission struct { + // AllowFileDiscovery: Whether the permission allows the file to be + // discovered through search. This is only applicable for permissions of + // type domain or anyone. + AllowFileDiscovery bool `json:"allowFileDiscovery,omitempty"` + + // DisplayName: A displayable name for users, groups or domains. + DisplayName string `json:"displayName,omitempty"` + + // Domain: The domain to which this permission refers. + Domain string `json:"domain,omitempty"` + + // EmailAddress: The email address of the user or group to which this + // permission refers. + EmailAddress string `json:"emailAddress,omitempty"` + + // Id: The ID of this permission. This is a unique identifier for the + // grantee, and is published in User resources as permissionId. + Id string `json:"id,omitempty"` + + // Kind: This is always drive#permission. + Kind string `json:"kind,omitempty"` + + // PhotoLink: A link to the user's profile photo, if available. + PhotoLink string `json:"photoLink,omitempty"` + + // Role: The role granted by this permission. Valid values are: + // - owner + // - writer + // - commenter + // - reader + Role string `json:"role,omitempty"` + + // Type: The type of the grantee. Valid values are: + // - user + // - group + // - domain + // - anyone + Type string `json:"type,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AllowFileDiscovery") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Permission) MarshalJSON() ([]byte, error) { + type noMethod Permission + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// PermissionList: A list of permissions for a file. +type PermissionList struct { + // Kind: This is always drive#permissionList. + Kind string `json:"kind,omitempty"` + + // Permissions: The full list of permissions. + Permissions []*Permission `json:"permissions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *PermissionList) MarshalJSON() ([]byte, error) { + type noMethod PermissionList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Reply: A reply to a comment on a file. +type Reply struct { + // Action: The action the reply performed to the parent comment. Valid + // values are: + // - resolve + // - reopen + Action string `json:"action,omitempty"` + + // Author: The user who created the reply. + Author *User `json:"author,omitempty"` + + // Content: The plain text content of the reply. This field is used for + // setting the content, while htmlContent should be displayed. This is + // required on creates if no action is specified. + Content string `json:"content,omitempty"` + + // CreatedTime: The time at which the reply was created (RFC 3339 + // date-time). + CreatedTime string `json:"createdTime,omitempty"` + + // Deleted: Whether the reply has been deleted. A deleted reply has no + // content. + Deleted bool `json:"deleted,omitempty"` + + // HtmlContent: The content of the reply with HTML formatting. + HtmlContent string `json:"htmlContent,omitempty"` + + // Id: The ID of the reply. + Id string `json:"id,omitempty"` + + // Kind: This is always drive#reply. + Kind string `json:"kind,omitempty"` + + // ModifiedTime: The last time the reply was modified (RFC 3339 + // date-time). + ModifiedTime string `json:"modifiedTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Reply) MarshalJSON() ([]byte, error) { + type noMethod Reply + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ReplyList: A list of replies to a comment on a file. +type ReplyList struct { + // Kind: This is always drive#replyList. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The page token for the next page of replies. This will + // be absent if the end of the replies list has been reached. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Replies: The page of replies. + Replies []*Reply `json:"replies,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ReplyList) MarshalJSON() ([]byte, error) { + type noMethod ReplyList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Revision: The metadata for a revision to a file. +type Revision struct { + // Id: The ID of the revision. + Id string `json:"id,omitempty"` + + // KeepForever: Whether to keep this revision forever, even if it is no + // longer the head revision. If not set, the revision will be + // automatically purged 30 days after newer content is uploaded. This + // can be set on a maximum of 200 revisions for a file. + // This field is only applicable to files with binary content in Drive. + KeepForever bool `json:"keepForever,omitempty"` + + // Kind: This is always drive#revision. + Kind string `json:"kind,omitempty"` + + // LastModifyingUser: The last user to modify this revision. + LastModifyingUser *User `json:"lastModifyingUser,omitempty"` + + // Md5Checksum: The MD5 checksum of the revision's content. This is only + // applicable to files with binary content in Drive. + Md5Checksum string `json:"md5Checksum,omitempty"` + + // MimeType: The MIME type of the revision. + MimeType string `json:"mimeType,omitempty"` + + // ModifiedTime: The last time the revision was modified (RFC 3339 + // date-time). + ModifiedTime string `json:"modifiedTime,omitempty"` + + // OriginalFilename: The original filename used to create this revision. + // This is only applicable to files with binary content in Drive. + OriginalFilename string `json:"originalFilename,omitempty"` + + // PublishAuto: Whether subsequent revisions will be automatically + // republished. This is only applicable to Google Docs. + PublishAuto bool `json:"publishAuto,omitempty"` + + // Published: Whether this revision is published. This is only + // applicable to Google Docs. + Published bool `json:"published,omitempty"` + + // PublishedOutsideDomain: Whether this revision is published outside + // the domain. This is only applicable to Google Docs. + PublishedOutsideDomain bool `json:"publishedOutsideDomain,omitempty"` + + // Size: The size of the revision's content in bytes. This is only + // applicable to files with binary content in Drive. + Size int64 `json:"size,omitempty,string"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Revision) MarshalJSON() ([]byte, error) { + type noMethod Revision + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// RevisionList: A list of revisions of a file. +type RevisionList struct { + // Kind: This is always drive#revisionList. + Kind string `json:"kind,omitempty"` + + // Revisions: The full list of revisions. + Revisions []*Revision `json:"revisions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RevisionList) MarshalJSON() ([]byte, error) { + type noMethod RevisionList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type StartPageToken struct { + // Kind: This is always drive#startPageToken. + Kind string `json:"kind,omitempty"` + + // StartPageToken: The starting page token for listing changes. + StartPageToken string `json:"startPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *StartPageToken) MarshalJSON() ([]byte, error) { + type noMethod StartPageToken + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// User: Information about a Drive user. +type User struct { + // DisplayName: A plain text displayable name for this user. + DisplayName string `json:"displayName,omitempty"` + + // EmailAddress: The email address of the user. This may not be present + // in certain contexts if the user has not made their email address + // visible to the requester. + EmailAddress string `json:"emailAddress,omitempty"` + + // Kind: This is always drive#user. + Kind string `json:"kind,omitempty"` + + // Me: Whether this user is the requesting user. + Me bool `json:"me,omitempty"` + + // PermissionId: The user's ID as visible in Permission resources. + PermissionId string `json:"permissionId,omitempty"` + + // PhotoLink: A link to the user's profile photo, if available. + PhotoLink string `json:"photoLink,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *User) MarshalJSON() ([]byte, error) { + type noMethod User + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// method id "drive.about.get": + +type AboutGetCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets information about the user, the user's Drive, and system +// capabilities. +func (r *AboutService) Get() *AboutGetCall { + c := &AboutGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AboutGetCall) Fields(s ...googleapi.Field) *AboutGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AboutGetCall) IfNoneMatch(entityTag string) *AboutGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AboutGetCall) Context(ctx context.Context) *AboutGetCall { + c.ctx_ = ctx + return c +} + +func (c *AboutGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "about") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.about.get" call. +// Exactly one of *About or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *About.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AboutGetCall) Do(opts ...googleapi.CallOption) (*About, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &About{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets information about the user, the user's Drive, and system capabilities.", + // "httpMethod": "GET", + // "id": "drive.about.get", + // "path": "about", + // "response": { + // "$ref": "About" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// method id "drive.changes.getStartPageToken": + +type ChangesGetStartPageTokenCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// GetStartPageToken: Gets the starting pageToken for listing future +// changes. +func (r *ChangesService) GetStartPageToken() *ChangesGetStartPageTokenCall { + c := &ChangesGetStartPageTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChangesGetStartPageTokenCall) Fields(s ...googleapi.Field) *ChangesGetStartPageTokenCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ChangesGetStartPageTokenCall) IfNoneMatch(entityTag string) *ChangesGetStartPageTokenCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChangesGetStartPageTokenCall) Context(ctx context.Context) *ChangesGetStartPageTokenCall { + c.ctx_ = ctx + return c +} + +func (c *ChangesGetStartPageTokenCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "changes/startPageToken") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.changes.getStartPageToken" call. +// Exactly one of *StartPageToken or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *StartPageToken.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ChangesGetStartPageTokenCall) Do(opts ...googleapi.CallOption) (*StartPageToken, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &StartPageToken{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the starting pageToken for listing future changes.", + // "httpMethod": "GET", + // "id": "drive.changes.getStartPageToken", + // "path": "changes/startPageToken", + // "response": { + // "$ref": "StartPageToken" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// method id "drive.changes.list": + +type ChangesListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists changes for a user. +func (r *ChangesService) List(pageToken string) *ChangesListCall { + c := &ChangesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// IncludeRemoved sets the optional parameter "includeRemoved": Whether +// to include changes indicating that items have left the view of the +// changes list, for example by deletion or lost access. +func (c *ChangesListCall) IncludeRemoved(includeRemoved bool) *ChangesListCall { + c.urlParams_.Set("includeRemoved", fmt.Sprint(includeRemoved)) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of changes to return per page. +func (c *ChangesListCall) PageSize(pageSize int64) *ChangesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// RestrictToMyDrive sets the optional parameter "restrictToMyDrive": +// Whether to restrict the results to changes inside the My Drive +// hierarchy. This omits changes to files such as those in the +// Application Data folder or shared files which have not been added to +// My Drive. +func (c *ChangesListCall) RestrictToMyDrive(restrictToMyDrive bool) *ChangesListCall { + c.urlParams_.Set("restrictToMyDrive", fmt.Sprint(restrictToMyDrive)) + return c +} + +// Spaces sets the optional parameter "spaces": A comma-separated list +// of spaces to query within the user corpus. Supported values are +// 'drive', 'appDataFolder' and 'photos'. +func (c *ChangesListCall) Spaces(spaces string) *ChangesListCall { + c.urlParams_.Set("spaces", spaces) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChangesListCall) Fields(s ...googleapi.Field) *ChangesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ChangesListCall) IfNoneMatch(entityTag string) *ChangesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChangesListCall) Context(ctx context.Context) *ChangesListCall { + c.ctx_ = ctx + return c +} + +func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "changes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.changes.list" call. +// Exactly one of *ChangeList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ChangeList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangeList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ChangeList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists changes for a user.", + // "httpMethod": "GET", + // "id": "drive.changes.list", + // "parameterOrder": [ + // "pageToken" + // ], + // "parameters": { + // "includeRemoved": { + // "default": "true", + // "description": "Whether to include changes indicating that items have left the view of the changes list, for example by deletion or lost access.", + // "location": "query", + // "type": "boolean" + // }, + // "pageSize": { + // "default": "100", + // "description": "The maximum number of changes to return per page.", + // "format": "int32", + // "location": "query", + // "maximum": "1000", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "restrictToMyDrive": { + // "default": "false", + // "description": "Whether to restrict the results to changes inside the My Drive hierarchy. This omits changes to files such as those in the Application Data folder or shared files which have not been added to My Drive.", + // "location": "query", + // "type": "boolean" + // }, + // "spaces": { + // "default": "drive", + // "description": "A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "changes", + // "response": { + // "$ref": "ChangeList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ], + // "supportsSubscription": true + // } + +} + +// method id "drive.changes.watch": + +type ChangesWatchCall struct { + s *Service + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Watch: Subscribes to changes for a user. +func (r *ChangesService) Watch(pageToken string, channel *Channel) *ChangesWatchCall { + c := &ChangesWatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("pageToken", pageToken) + c.channel = channel + return c +} + +// IncludeRemoved sets the optional parameter "includeRemoved": Whether +// to include changes indicating that items have left the view of the +// changes list, for example by deletion or lost access. +func (c *ChangesWatchCall) IncludeRemoved(includeRemoved bool) *ChangesWatchCall { + c.urlParams_.Set("includeRemoved", fmt.Sprint(includeRemoved)) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of changes to return per page. +func (c *ChangesWatchCall) PageSize(pageSize int64) *ChangesWatchCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// RestrictToMyDrive sets the optional parameter "restrictToMyDrive": +// Whether to restrict the results to changes inside the My Drive +// hierarchy. This omits changes to files such as those in the +// Application Data folder or shared files which have not been added to +// My Drive. +func (c *ChangesWatchCall) RestrictToMyDrive(restrictToMyDrive bool) *ChangesWatchCall { + c.urlParams_.Set("restrictToMyDrive", fmt.Sprint(restrictToMyDrive)) + return c +} + +// Spaces sets the optional parameter "spaces": A comma-separated list +// of spaces to query within the user corpus. Supported values are +// 'drive', 'appDataFolder' and 'photos'. +func (c *ChangesWatchCall) Spaces(spaces string) *ChangesWatchCall { + c.urlParams_.Set("spaces", spaces) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChangesWatchCall) Fields(s ...googleapi.Field) *ChangesWatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChangesWatchCall) Context(ctx context.Context) *ChangesWatchCall { + c.ctx_ = ctx + return c +} + +func (c *ChangesWatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "changes/watch") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.changes.watch" call. +// Exactly one of *Channel or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Channel.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ChangesWatchCall) Do(opts ...googleapi.CallOption) (*Channel, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Channel{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Subscribes to changes for a user.", + // "httpMethod": "POST", + // "id": "drive.changes.watch", + // "parameterOrder": [ + // "pageToken" + // ], + // "parameters": { + // "includeRemoved": { + // "default": "true", + // "description": "Whether to include changes indicating that items have left the view of the changes list, for example by deletion or lost access.", + // "location": "query", + // "type": "boolean" + // }, + // "pageSize": { + // "default": "100", + // "description": "The maximum number of changes to return per page.", + // "format": "int32", + // "location": "query", + // "maximum": "1000", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "restrictToMyDrive": { + // "default": "false", + // "description": "Whether to restrict the results to changes inside the My Drive hierarchy. This omits changes to files such as those in the Application Data folder or shared files which have not been added to My Drive.", + // "location": "query", + // "type": "boolean" + // }, + // "spaces": { + // "default": "drive", + // "description": "A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "changes/watch", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "response": { + // "$ref": "Channel" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ], + // "supportsSubscription": true + // } + +} + +// method id "drive.channels.stop": + +type ChannelsStopCall struct { + s *Service + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Stop: Stop watching resources through this channel +func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { + c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.channel = channel + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { + c.ctx_ = ctx + return c +} + +func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.channels.stop" call. +func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Stop watching resources through this channel", + // "httpMethod": "POST", + // "id": "drive.channels.stop", + // "path": "channels/stop", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// method id "drive.comments.create": + +type CommentsCreateCall struct { + s *Service + fileId string + comment *Comment + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Create: Creates a new comment on a file. +func (r *CommentsService) Create(fileId string, comment *Comment) *CommentsCreateCall { + c := &CommentsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.comment = comment + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CommentsCreateCall) Fields(s ...googleapi.Field) *CommentsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CommentsCreateCall) Context(ctx context.Context) *CommentsCreateCall { + c.ctx_ = ctx + return c +} + +func (c *CommentsCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.comment) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.comments.create" call. +// Exactly one of *Comment or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Comment.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *CommentsCreateCall) Do(opts ...googleapi.CallOption) (*Comment, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Comment{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new comment on a file.", + // "httpMethod": "POST", + // "id": "drive.comments.create", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments", + // "request": { + // "$ref": "Comment" + // }, + // "response": { + // "$ref": "Comment" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.comments.delete": + +type CommentsDeleteCall struct { + s *Service + fileId string + commentId string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes a comment. +func (r *CommentsService) Delete(fileId string, commentId string) *CommentsDeleteCall { + c := &CommentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.commentId = commentId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CommentsDeleteCall) Fields(s ...googleapi.Field) *CommentsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CommentsDeleteCall) Context(ctx context.Context) *CommentsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *CommentsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "commentId": c.commentId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.comments.delete" call. +func (c *CommentsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes a comment.", + // "httpMethod": "DELETE", + // "id": "drive.comments.delete", + // "parameterOrder": [ + // "fileId", + // "commentId" + // ], + // "parameters": { + // "commentId": { + // "description": "The ID of the comment.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments/{commentId}", + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.comments.get": + +type CommentsGetCall struct { + s *Service + fileId string + commentId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets a comment by ID. +func (r *CommentsService) Get(fileId string, commentId string) *CommentsGetCall { + c := &CommentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.commentId = commentId + return c +} + +// IncludeDeleted sets the optional parameter "includeDeleted": Whether +// to return deleted comments. Deleted comments will not include their +// original content. +func (c *CommentsGetCall) IncludeDeleted(includeDeleted bool) *CommentsGetCall { + c.urlParams_.Set("includeDeleted", fmt.Sprint(includeDeleted)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CommentsGetCall) Fields(s ...googleapi.Field) *CommentsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *CommentsGetCall) IfNoneMatch(entityTag string) *CommentsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CommentsGetCall) Context(ctx context.Context) *CommentsGetCall { + c.ctx_ = ctx + return c +} + +func (c *CommentsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "commentId": c.commentId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.comments.get" call. +// Exactly one of *Comment or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Comment.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *CommentsGetCall) Do(opts ...googleapi.CallOption) (*Comment, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Comment{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a comment by ID.", + // "httpMethod": "GET", + // "id": "drive.comments.get", + // "parameterOrder": [ + // "fileId", + // "commentId" + // ], + // "parameters": { + // "commentId": { + // "description": "The ID of the comment.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "includeDeleted": { + // "default": "false", + // "description": "Whether to return deleted comments. Deleted comments will not include their original content.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "files/{fileId}/comments/{commentId}", + // "response": { + // "$ref": "Comment" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// method id "drive.comments.list": + +type CommentsListCall struct { + s *Service + fileId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists a file's comments. +func (r *CommentsService) List(fileId string) *CommentsListCall { + c := &CommentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + return c +} + +// IncludeDeleted sets the optional parameter "includeDeleted": Whether +// to include deleted comments. Deleted comments will not include their +// original content. +func (c *CommentsListCall) IncludeDeleted(includeDeleted bool) *CommentsListCall { + c.urlParams_.Set("includeDeleted", fmt.Sprint(includeDeleted)) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of comments to return per page. +func (c *CommentsListCall) PageSize(pageSize int64) *CommentsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The token for +// continuing a previous list request on the next page. This should be +// set to the value of 'nextPageToken' from the previous response. +func (c *CommentsListCall) PageToken(pageToken string) *CommentsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// StartModifiedTime sets the optional parameter "startModifiedTime": +// The minimum value of 'modifiedTime' for the result comments (RFC 3339 +// date-time). +func (c *CommentsListCall) StartModifiedTime(startModifiedTime string) *CommentsListCall { + c.urlParams_.Set("startModifiedTime", startModifiedTime) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CommentsListCall) Fields(s ...googleapi.Field) *CommentsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *CommentsListCall) IfNoneMatch(entityTag string) *CommentsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CommentsListCall) Context(ctx context.Context) *CommentsListCall { + c.ctx_ = ctx + return c +} + +func (c *CommentsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.comments.list" call. +// Exactly one of *CommentList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *CommentList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *CommentsListCall) Do(opts ...googleapi.CallOption) (*CommentList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &CommentList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists a file's comments.", + // "httpMethod": "GET", + // "id": "drive.comments.list", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "includeDeleted": { + // "default": "false", + // "description": "Whether to include deleted comments. Deleted comments will not include their original content.", + // "location": "query", + // "type": "boolean" + // }, + // "pageSize": { + // "default": "20", + // "description": "The maximum number of comments to return per page.", + // "format": "int32", + // "location": "query", + // "maximum": "100", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + // "location": "query", + // "type": "string" + // }, + // "startModifiedTime": { + // "description": "The minimum value of 'modifiedTime' for the result comments (RFC 3339 date-time).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments", + // "response": { + // "$ref": "CommentList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *CommentsListCall) Pages(ctx context.Context, f func(*CommentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "drive.comments.update": + +type CommentsUpdateCall struct { + s *Service + fileId string + commentId string + comment *Comment + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates a comment with patch semantics. +func (r *CommentsService) Update(fileId string, commentId string, comment *Comment) *CommentsUpdateCall { + c := &CommentsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.commentId = commentId + c.comment = comment + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CommentsUpdateCall) Fields(s ...googleapi.Field) *CommentsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CommentsUpdateCall) Context(ctx context.Context) *CommentsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *CommentsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.comment) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "commentId": c.commentId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.comments.update" call. +// Exactly one of *Comment or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Comment.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *CommentsUpdateCall) Do(opts ...googleapi.CallOption) (*Comment, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Comment{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a comment with patch semantics.", + // "httpMethod": "PATCH", + // "id": "drive.comments.update", + // "parameterOrder": [ + // "fileId", + // "commentId" + // ], + // "parameters": { + // "commentId": { + // "description": "The ID of the comment.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments/{commentId}", + // "request": { + // "$ref": "Comment" + // }, + // "response": { + // "$ref": "Comment" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.files.copy": + +type FilesCopyCall struct { + s *Service + fileId string + file *File + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Copy: Creates a copy of a file and applies any requested updates with +// patch semantics. +func (r *FilesService) Copy(fileId string, file *File) *FilesCopyCall { + c := &FilesCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.file = file + return c +} + +// IgnoreDefaultVisibility sets the optional parameter +// "ignoreDefaultVisibility": Whether to ignore the domain's default +// visibility settings for the created file. Domain administrators can +// choose to make all uploaded files visible to the domain by default; +// this parameter bypasses that behavior for the request. Permissions +// are still inherited from parent folders. +func (c *FilesCopyCall) IgnoreDefaultVisibility(ignoreDefaultVisibility bool) *FilesCopyCall { + c.urlParams_.Set("ignoreDefaultVisibility", fmt.Sprint(ignoreDefaultVisibility)) + return c +} + +// KeepRevisionForever sets the optional parameter +// "keepRevisionForever": Whether to set the 'keepForever' field in the +// new head revision. This is only applicable to files with binary +// content in Drive. +func (c *FilesCopyCall) KeepRevisionForever(keepRevisionForever bool) *FilesCopyCall { + c.urlParams_.Set("keepRevisionForever", fmt.Sprint(keepRevisionForever)) + return c +} + +// OcrLanguage sets the optional parameter "ocrLanguage": A language +// hint for OCR processing during image import (ISO 639-1 code). +func (c *FilesCopyCall) OcrLanguage(ocrLanguage string) *FilesCopyCall { + c.urlParams_.Set("ocrLanguage", ocrLanguage) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesCopyCall) Fields(s ...googleapi.Field) *FilesCopyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FilesCopyCall) Context(ctx context.Context) *FilesCopyCall { + c.ctx_ = ctx + return c +} + +func (c *FilesCopyCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.file) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/copy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.files.copy" call. +// Exactly one of *File or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *File.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *FilesCopyCall) Do(opts ...googleapi.CallOption) (*File, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &File{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a copy of a file and applies any requested updates with patch semantics.", + // "httpMethod": "POST", + // "id": "drive.files.copy", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ignoreDefaultVisibility": { + // "default": "false", + // "description": "Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.", + // "location": "query", + // "type": "boolean" + // }, + // "keepRevisionForever": { + // "default": "false", + // "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", + // "location": "query", + // "type": "boolean" + // }, + // "ocrLanguage": { + // "description": "A language hint for OCR processing during image import (ISO 639-1 code).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "files/{fileId}/copy", + // "request": { + // "$ref": "File" + // }, + // "response": { + // "$ref": "File" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.photos.readonly" + // ] + // } + +} + +// method id "drive.files.create": + +type FilesCreateCall struct { + s *Service + file *File + urlParams_ gensupport.URLParams + media_ io.Reader + resumableBuffer_ *gensupport.ResumableBuffer + mediaType_ string + mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater_ googleapi.ProgressUpdater + ctx_ context.Context +} + +// Create: Creates a new file. +func (r *FilesService) Create(file *File) *FilesCreateCall { + c := &FilesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.file = file + return c +} + +// IgnoreDefaultVisibility sets the optional parameter +// "ignoreDefaultVisibility": Whether to ignore the domain's default +// visibility settings for the created file. Domain administrators can +// choose to make all uploaded files visible to the domain by default; +// this parameter bypasses that behavior for the request. Permissions +// are still inherited from parent folders. +func (c *FilesCreateCall) IgnoreDefaultVisibility(ignoreDefaultVisibility bool) *FilesCreateCall { + c.urlParams_.Set("ignoreDefaultVisibility", fmt.Sprint(ignoreDefaultVisibility)) + return c +} + +// KeepRevisionForever sets the optional parameter +// "keepRevisionForever": Whether to set the 'keepForever' field in the +// new head revision. This is only applicable to files with binary +// content in Drive. +func (c *FilesCreateCall) KeepRevisionForever(keepRevisionForever bool) *FilesCreateCall { + c.urlParams_.Set("keepRevisionForever", fmt.Sprint(keepRevisionForever)) + return c +} + +// OcrLanguage sets the optional parameter "ocrLanguage": A language +// hint for OCR processing during image import (ISO 639-1 code). +func (c *FilesCreateCall) OcrLanguage(ocrLanguage string) *FilesCreateCall { + c.urlParams_.Set("ocrLanguage", ocrLanguage) + return c +} + +// UseContentAsIndexableText sets the optional parameter +// "useContentAsIndexableText": Whether to use the uploaded content as +// indexable text. +func (c *FilesCreateCall) UseContentAsIndexableText(useContentAsIndexableText bool) *FilesCreateCall { + c.urlParams_.Set("useContentAsIndexableText", fmt.Sprint(useContentAsIndexableText)) + return c +} + +// Media specifies the media to upload in one or more chunks. The chunk +// size may be controlled by supplying a MediaOption generated by +// googleapi.ChunkSize. The chunk size defaults to +// googleapi.DefaultUploadChunkSize.The Content-Type header used in the +// upload request will be determined by sniffing the contents of r, +// unless a MediaOption generated by googleapi.ContentType is +// supplied. +// At most one of Media and ResumableMedia may be set. +func (c *FilesCreateCall) Media(r io.Reader, options ...googleapi.MediaOption) *FilesCreateCall { + opts := googleapi.ProcessMediaOptions(options) + chunkSize := opts.ChunkSize + if !opts.ForceEmptyContentType { + r, c.mediaType_ = gensupport.DetermineContentType(r, opts.ContentType) + } + c.media_, c.resumableBuffer_ = gensupport.PrepareUpload(r, chunkSize) + return c +} + +// ResumableMedia specifies the media to upload in chunks and can be +// canceled with ctx. +// +// Deprecated: use Media instead. +// +// At most one of Media and ResumableMedia may be set. mediaType +// identifies the MIME media type of the upload, such as "image/png". If +// mediaType is "", it will be auto-detected. The provided ctx will +// supersede any context previously provided to the Context method. +func (c *FilesCreateCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *FilesCreateCall { + c.ctx_ = ctx + rdr := gensupport.ReaderAtToReader(r, size) + rdr, c.mediaType_ = gensupport.DetermineContentType(rdr, mediaType) + c.resumableBuffer_ = gensupport.NewResumableBuffer(rdr, googleapi.DefaultUploadChunkSize) + c.media_ = nil + c.mediaSize_ = size + return c +} + +// ProgressUpdater provides a callback function that will be called +// after every chunk. It should be a low-latency function in order to +// not slow down the upload operation. This should only be called when +// using ResumableMedia (as opposed to Media). +func (c *FilesCreateCall) ProgressUpdater(pu googleapi.ProgressUpdater) *FilesCreateCall { + c.progressUpdater_ = pu + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesCreateCall) Fields(s ...googleapi.Field) *FilesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +// This context will supersede any context previously provided to the +// ResumableMedia method. +func (c *FilesCreateCall) Context(ctx context.Context) *FilesCreateCall { + c.ctx_ = ctx + return c +} + +func (c *FilesCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.file) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files") + if c.media_ != nil || c.resumableBuffer_ != nil { + urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) + protocol := "multipart" + if c.resumableBuffer_ != nil { + protocol = "resumable" + } + c.urlParams_.Set("uploadType", protocol) + } + urls += "?" + c.urlParams_.Encode() + if c.media_ != nil { + var combined io.ReadCloser + combined, ctype = gensupport.CombineBodyMedia(body, ctype, c.media_, c.mediaType_) + defer combined.Close() + body = combined + } + req, _ := http.NewRequest("POST", urls, body) + googleapi.SetOpaque(req.URL) + if c.resumableBuffer_ != nil && c.mediaType_ != "" { + req.Header.Set("X-Upload-Content-Type", c.mediaType_) + } + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.files.create" call. +// Exactly one of *File or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *File.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *FilesCreateCall) Do(opts ...googleapi.CallOption) (*File, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + if c.resumableBuffer_ != nil { + loc := res.Header.Get("Location") + rx := &gensupport.ResumableUpload{ + Client: c.s.client, + UserAgent: c.s.userAgent(), + URI: loc, + Media: c.resumableBuffer_, + MediaType: c.mediaType_, + Callback: func(curr int64) { + if c.progressUpdater_ != nil { + c.progressUpdater_(curr, c.mediaSize_) + } + }, + } + ctx := c.ctx_ + if ctx == nil { + ctx = context.TODO() + } + res, err = rx.Upload(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + } + ret := &File{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new file.", + // "httpMethod": "POST", + // "id": "drive.files.create", + // "mediaUpload": { + // "accept": [ + // "*/*" + // ], + // "maxSize": "5120GB", + // "protocols": { + // "resumable": { + // "multipart": true, + // "path": "/resumable/upload/drive/v3/files" + // }, + // "simple": { + // "multipart": true, + // "path": "/upload/drive/v3/files" + // } + // } + // }, + // "parameters": { + // "ignoreDefaultVisibility": { + // "default": "false", + // "description": "Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.", + // "location": "query", + // "type": "boolean" + // }, + // "keepRevisionForever": { + // "default": "false", + // "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", + // "location": "query", + // "type": "boolean" + // }, + // "ocrLanguage": { + // "description": "A language hint for OCR processing during image import (ISO 639-1 code).", + // "location": "query", + // "type": "string" + // }, + // "useContentAsIndexableText": { + // "default": "false", + // "description": "Whether to use the uploaded content as indexable text.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "files", + // "request": { + // "$ref": "File" + // }, + // "response": { + // "$ref": "File" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file" + // ], + // "supportsMediaUpload": true, + // "supportsSubscription": true + // } + +} + +// method id "drive.files.delete": + +type FilesDeleteCall struct { + s *Service + fileId string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Permanently deletes a file owned by the user without moving +// it to the trash. If the target is a folder, all descendants owned by +// the user are also deleted. +func (r *FilesService) Delete(fileId string) *FilesDeleteCall { + c := &FilesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesDeleteCall) Fields(s ...googleapi.Field) *FilesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FilesDeleteCall) Context(ctx context.Context) *FilesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *FilesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.files.delete" call. +func (c *FilesDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes a file owned by the user without moving it to the trash. If the target is a folder, all descendants owned by the user are also deleted.", + // "httpMethod": "DELETE", + // "id": "drive.files.delete", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}", + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.files.emptyTrash": + +type FilesEmptyTrashCall struct { + s *Service + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// EmptyTrash: Permanently deletes all of the user's trashed files. +func (r *FilesService) EmptyTrash() *FilesEmptyTrashCall { + c := &FilesEmptyTrashCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesEmptyTrashCall) Fields(s ...googleapi.Field) *FilesEmptyTrashCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FilesEmptyTrashCall) Context(ctx context.Context) *FilesEmptyTrashCall { + c.ctx_ = ctx + return c +} + +func (c *FilesEmptyTrashCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/trash") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.files.emptyTrash" call. +func (c *FilesEmptyTrashCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes all of the user's trashed files.", + // "httpMethod": "DELETE", + // "id": "drive.files.emptyTrash", + // "path": "files/trash", + // "scopes": [ + // "https://www.googleapis.com/auth/drive" + // ] + // } + +} + +// method id "drive.files.export": + +type FilesExportCall struct { + s *Service + fileId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Export: Exports a Google Doc to the requested MIME type and returns +// the exported content. +func (r *FilesService) Export(fileId string, mimeType string) *FilesExportCall { + c := &FilesExportCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.urlParams_.Set("mimeType", mimeType) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesExportCall) Fields(s ...googleapi.Field) *FilesExportCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FilesExportCall) IfNoneMatch(entityTag string) *FilesExportCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *FilesExportCall) Context(ctx context.Context) *FilesExportCall { + c.ctx_ = ctx + return c +} + +func (c *FilesExportCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/export") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *FilesExportCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "drive.files.export" call. +func (c *FilesExportCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Exports a Google Doc to the requested MIME type and returns the exported content.", + // "httpMethod": "GET", + // "id": "drive.files.export", + // "parameterOrder": [ + // "fileId", + // "mimeType" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "mimeType": { + // "description": "The MIME type of the format requested for this export.", + // "location": "query", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/export", + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.readonly" + // ], + // "supportsMediaDownload": true + // } + +} + +// method id "drive.files.generateIds": + +type FilesGenerateIdsCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// GenerateIds: Generates a set of file IDs which can be provided in +// create requests. +func (r *FilesService) GenerateIds() *FilesGenerateIdsCall { + c := &FilesGenerateIdsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Count sets the optional parameter "count": The number of IDs to +// return. +func (c *FilesGenerateIdsCall) Count(count int64) *FilesGenerateIdsCall { + c.urlParams_.Set("count", fmt.Sprint(count)) + return c +} + +// Space sets the optional parameter "space": The space in which the IDs +// can be used to create new files. Supported values are 'drive' and +// 'appDataFolder'. +func (c *FilesGenerateIdsCall) Space(space string) *FilesGenerateIdsCall { + c.urlParams_.Set("space", space) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesGenerateIdsCall) Fields(s ...googleapi.Field) *FilesGenerateIdsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FilesGenerateIdsCall) IfNoneMatch(entityTag string) *FilesGenerateIdsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FilesGenerateIdsCall) Context(ctx context.Context) *FilesGenerateIdsCall { + c.ctx_ = ctx + return c +} + +func (c *FilesGenerateIdsCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/generateIds") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.files.generateIds" call. +// Exactly one of *GeneratedIds or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *GeneratedIds.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FilesGenerateIdsCall) Do(opts ...googleapi.CallOption) (*GeneratedIds, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GeneratedIds{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Generates a set of file IDs which can be provided in create requests.", + // "httpMethod": "GET", + // "id": "drive.files.generateIds", + // "parameters": { + // "count": { + // "default": "10", + // "description": "The number of IDs to return.", + // "format": "int32", + // "location": "query", + // "maximum": "1000", + // "minimum": "1", + // "type": "integer" + // }, + // "space": { + // "default": "drive", + // "description": "The space in which the IDs can be used to create new files. Supported values are 'drive' and 'appDataFolder'.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "files/generateIds", + // "response": { + // "$ref": "GeneratedIds" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.files.get": + +type FilesGetCall struct { + s *Service + fileId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets a file's metadata or content by ID. +func (r *FilesService) Get(fileId string) *FilesGetCall { + c := &FilesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + return c +} + +// AcknowledgeAbuse sets the optional parameter "acknowledgeAbuse": +// Whether the user is acknowledging the risk of downloading known +// malware or other abusive files. This is only applicable when +// alt=media. +func (c *FilesGetCall) AcknowledgeAbuse(acknowledgeAbuse bool) *FilesGetCall { + c.urlParams_.Set("acknowledgeAbuse", fmt.Sprint(acknowledgeAbuse)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesGetCall) Fields(s ...googleapi.Field) *FilesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FilesGetCall) IfNoneMatch(entityTag string) *FilesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *FilesGetCall) Context(ctx context.Context) *FilesGetCall { + c.ctx_ = ctx + return c +} + +func (c *FilesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *FilesGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "drive.files.get" call. +// Exactly one of *File or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *File.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *FilesGetCall) Do(opts ...googleapi.CallOption) (*File, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &File{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a file's metadata or content by ID.", + // "httpMethod": "GET", + // "id": "drive.files.get", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "acknowledgeAbuse": { + // "default": "false", + // "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", + // "location": "query", + // "type": "boolean" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}", + // "response": { + // "$ref": "File" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ], + // "supportsMediaDownload": true, + // "supportsSubscription": true, + // "useMediaDownloadService": true + // } + +} + +// method id "drive.files.list": + +type FilesListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists or searches files. +func (r *FilesService) List() *FilesListCall { + c := &FilesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Corpus sets the optional parameter "corpus": The source of files to +// list. +// +// Possible values: +// "domain" - Files shared to the user's domain. +// "user" (default) - Files owned by or shared to the user. +func (c *FilesListCall) Corpus(corpus string) *FilesListCall { + c.urlParams_.Set("corpus", corpus) + return c +} + +// OrderBy sets the optional parameter "orderBy": A comma-separated list +// of sort keys. Valid keys are 'createdTime', 'folder', +// 'modifiedByMeTime', 'modifiedTime', 'name', 'quotaBytesUsed', +// 'recency', 'sharedWithMeTime', 'starred', and 'viewedByMeTime'. Each +// key sorts ascending by default, but may be reversed with the 'desc' +// modifier. Example usage: ?orderBy=folder,modifiedTime desc,name. +// Please note that there is a current limitation for users with +// approximately one million files in which the requested sort order is +// ignored. +func (c *FilesListCall) OrderBy(orderBy string) *FilesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of files to return per page. +func (c *FilesListCall) PageSize(pageSize int64) *FilesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The token for +// continuing a previous list request on the next page. This should be +// set to the value of 'nextPageToken' from the previous response. +func (c *FilesListCall) PageToken(pageToken string) *FilesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Q sets the optional parameter "q": A query for filtering the file +// results. See the "Search for Files" guide for supported syntax. +func (c *FilesListCall) Q(q string) *FilesListCall { + c.urlParams_.Set("q", q) + return c +} + +// Spaces sets the optional parameter "spaces": A comma-separated list +// of spaces to query within the corpus. Supported values are 'drive', +// 'appDataFolder' and 'photos'. +func (c *FilesListCall) Spaces(spaces string) *FilesListCall { + c.urlParams_.Set("spaces", spaces) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesListCall) Fields(s ...googleapi.Field) *FilesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FilesListCall) IfNoneMatch(entityTag string) *FilesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FilesListCall) Context(ctx context.Context) *FilesListCall { + c.ctx_ = ctx + return c +} + +func (c *FilesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.files.list" call. +// Exactly one of *FileList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *FileList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FilesListCall) Do(opts ...googleapi.CallOption) (*FileList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &FileList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists or searches files.", + // "httpMethod": "GET", + // "id": "drive.files.list", + // "parameters": { + // "corpus": { + // "default": "user", + // "description": "The source of files to list.", + // "enum": [ + // "domain", + // "user" + // ], + // "enumDescriptions": [ + // "Files shared to the user's domain.", + // "Files owned by or shared to the user." + // ], + // "location": "query", + // "type": "string" + // }, + // "orderBy": { + // "description": "A comma-separated list of sort keys. Valid keys are 'createdTime', 'folder', 'modifiedByMeTime', 'modifiedTime', 'name', 'quotaBytesUsed', 'recency', 'sharedWithMeTime', 'starred', and 'viewedByMeTime'. Each key sorts ascending by default, but may be reversed with the 'desc' modifier. Example usage: ?orderBy=folder,modifiedTime desc,name. Please note that there is a current limitation for users with approximately one million files in which the requested sort order is ignored.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "default": "100", + // "description": "The maximum number of files to return per page.", + // "format": "int32", + // "location": "query", + // "maximum": "1000", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + // "location": "query", + // "type": "string" + // }, + // "q": { + // "description": "A query for filtering the file results. See the \"Search for Files\" guide for supported syntax.", + // "location": "query", + // "type": "string" + // }, + // "spaces": { + // "default": "drive", + // "description": "A comma-separated list of spaces to query within the corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "files", + // "response": { + // "$ref": "FileList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *FilesListCall) Pages(ctx context.Context, f func(*FileList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "drive.files.update": + +type FilesUpdateCall struct { + s *Service + fileId string + file *File + urlParams_ gensupport.URLParams + media_ io.Reader + resumableBuffer_ *gensupport.ResumableBuffer + mediaType_ string + mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater_ googleapi.ProgressUpdater + ctx_ context.Context +} + +// Update: Updates a file's metadata and/or content with patch +// semantics. +func (r *FilesService) Update(fileId string, file *File) *FilesUpdateCall { + c := &FilesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.file = file + return c +} + +// AddParents sets the optional parameter "addParents": A +// comma-separated list of parent IDs to add. +func (c *FilesUpdateCall) AddParents(addParents string) *FilesUpdateCall { + c.urlParams_.Set("addParents", addParents) + return c +} + +// KeepRevisionForever sets the optional parameter +// "keepRevisionForever": Whether to set the 'keepForever' field in the +// new head revision. This is only applicable to files with binary +// content in Drive. +func (c *FilesUpdateCall) KeepRevisionForever(keepRevisionForever bool) *FilesUpdateCall { + c.urlParams_.Set("keepRevisionForever", fmt.Sprint(keepRevisionForever)) + return c +} + +// OcrLanguage sets the optional parameter "ocrLanguage": A language +// hint for OCR processing during image import (ISO 639-1 code). +func (c *FilesUpdateCall) OcrLanguage(ocrLanguage string) *FilesUpdateCall { + c.urlParams_.Set("ocrLanguage", ocrLanguage) + return c +} + +// RemoveParents sets the optional parameter "removeParents": A +// comma-separated list of parent IDs to remove. +func (c *FilesUpdateCall) RemoveParents(removeParents string) *FilesUpdateCall { + c.urlParams_.Set("removeParents", removeParents) + return c +} + +// UseContentAsIndexableText sets the optional parameter +// "useContentAsIndexableText": Whether to use the uploaded content as +// indexable text. +func (c *FilesUpdateCall) UseContentAsIndexableText(useContentAsIndexableText bool) *FilesUpdateCall { + c.urlParams_.Set("useContentAsIndexableText", fmt.Sprint(useContentAsIndexableText)) + return c +} + +// Media specifies the media to upload in one or more chunks. The chunk +// size may be controlled by supplying a MediaOption generated by +// googleapi.ChunkSize. The chunk size defaults to +// googleapi.DefaultUploadChunkSize.The Content-Type header used in the +// upload request will be determined by sniffing the contents of r, +// unless a MediaOption generated by googleapi.ContentType is +// supplied. +// At most one of Media and ResumableMedia may be set. +func (c *FilesUpdateCall) Media(r io.Reader, options ...googleapi.MediaOption) *FilesUpdateCall { + opts := googleapi.ProcessMediaOptions(options) + chunkSize := opts.ChunkSize + if !opts.ForceEmptyContentType { + r, c.mediaType_ = gensupport.DetermineContentType(r, opts.ContentType) + } + c.media_, c.resumableBuffer_ = gensupport.PrepareUpload(r, chunkSize) + return c +} + +// ResumableMedia specifies the media to upload in chunks and can be +// canceled with ctx. +// +// Deprecated: use Media instead. +// +// At most one of Media and ResumableMedia may be set. mediaType +// identifies the MIME media type of the upload, such as "image/png". If +// mediaType is "", it will be auto-detected. The provided ctx will +// supersede any context previously provided to the Context method. +func (c *FilesUpdateCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *FilesUpdateCall { + c.ctx_ = ctx + rdr := gensupport.ReaderAtToReader(r, size) + rdr, c.mediaType_ = gensupport.DetermineContentType(rdr, mediaType) + c.resumableBuffer_ = gensupport.NewResumableBuffer(rdr, googleapi.DefaultUploadChunkSize) + c.media_ = nil + c.mediaSize_ = size + return c +} + +// ProgressUpdater provides a callback function that will be called +// after every chunk. It should be a low-latency function in order to +// not slow down the upload operation. This should only be called when +// using ResumableMedia (as opposed to Media). +func (c *FilesUpdateCall) ProgressUpdater(pu googleapi.ProgressUpdater) *FilesUpdateCall { + c.progressUpdater_ = pu + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesUpdateCall) Fields(s ...googleapi.Field) *FilesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +// This context will supersede any context previously provided to the +// ResumableMedia method. +func (c *FilesUpdateCall) Context(ctx context.Context) *FilesUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *FilesUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.file) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}") + if c.media_ != nil || c.resumableBuffer_ != nil { + urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) + protocol := "multipart" + if c.resumableBuffer_ != nil { + protocol = "resumable" + } + c.urlParams_.Set("uploadType", protocol) + } + urls += "?" + c.urlParams_.Encode() + if c.media_ != nil { + var combined io.ReadCloser + combined, ctype = gensupport.CombineBodyMedia(body, ctype, c.media_, c.mediaType_) + defer combined.Close() + body = combined + } + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + if c.resumableBuffer_ != nil && c.mediaType_ != "" { + req.Header.Set("X-Upload-Content-Type", c.mediaType_) + } + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.files.update" call. +// Exactly one of *File or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *File.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *FilesUpdateCall) Do(opts ...googleapi.CallOption) (*File, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + if c.resumableBuffer_ != nil { + loc := res.Header.Get("Location") + rx := &gensupport.ResumableUpload{ + Client: c.s.client, + UserAgent: c.s.userAgent(), + URI: loc, + Media: c.resumableBuffer_, + MediaType: c.mediaType_, + Callback: func(curr int64) { + if c.progressUpdater_ != nil { + c.progressUpdater_(curr, c.mediaSize_) + } + }, + } + ctx := c.ctx_ + if ctx == nil { + ctx = context.TODO() + } + res, err = rx.Upload(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + } + ret := &File{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a file's metadata and/or content with patch semantics.", + // "httpMethod": "PATCH", + // "id": "drive.files.update", + // "mediaUpload": { + // "accept": [ + // "*/*" + // ], + // "maxSize": "5120GB", + // "protocols": { + // "resumable": { + // "multipart": true, + // "path": "/resumable/upload/drive/v3/files/{fileId}" + // }, + // "simple": { + // "multipart": true, + // "path": "/upload/drive/v3/files/{fileId}" + // } + // } + // }, + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "addParents": { + // "description": "A comma-separated list of parent IDs to add.", + // "location": "query", + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "keepRevisionForever": { + // "default": "false", + // "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.", + // "location": "query", + // "type": "boolean" + // }, + // "ocrLanguage": { + // "description": "A language hint for OCR processing during image import (ISO 639-1 code).", + // "location": "query", + // "type": "string" + // }, + // "removeParents": { + // "description": "A comma-separated list of parent IDs to remove.", + // "location": "query", + // "type": "string" + // }, + // "useContentAsIndexableText": { + // "default": "false", + // "description": "Whether to use the uploaded content as indexable text.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "files/{fileId}", + // "request": { + // "$ref": "File" + // }, + // "response": { + // "$ref": "File" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.scripts" + // ], + // "supportsMediaUpload": true + // } + +} + +// method id "drive.files.watch": + +type FilesWatchCall struct { + s *Service + fileId string + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Watch: Subscribes to changes to a file +func (r *FilesService) Watch(fileId string, channel *Channel) *FilesWatchCall { + c := &FilesWatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.channel = channel + return c +} + +// AcknowledgeAbuse sets the optional parameter "acknowledgeAbuse": +// Whether the user is acknowledging the risk of downloading known +// malware or other abusive files. This is only applicable when +// alt=media. +func (c *FilesWatchCall) AcknowledgeAbuse(acknowledgeAbuse bool) *FilesWatchCall { + c.urlParams_.Set("acknowledgeAbuse", fmt.Sprint(acknowledgeAbuse)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FilesWatchCall) Fields(s ...googleapi.Field) *FilesWatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *FilesWatchCall) Context(ctx context.Context) *FilesWatchCall { + c.ctx_ = ctx + return c +} + +func (c *FilesWatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/watch") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *FilesWatchCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "drive.files.watch" call. +// Exactly one of *Channel or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Channel.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *FilesWatchCall) Do(opts ...googleapi.CallOption) (*Channel, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Channel{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Subscribes to changes to a file", + // "httpMethod": "POST", + // "id": "drive.files.watch", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "acknowledgeAbuse": { + // "default": "false", + // "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", + // "location": "query", + // "type": "boolean" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/watch", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "response": { + // "$ref": "Channel" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ], + // "supportsMediaDownload": true, + // "supportsSubscription": true, + // "useMediaDownloadService": true + // } + +} + +// method id "drive.permissions.create": + +type PermissionsCreateCall struct { + s *Service + fileId string + permission *Permission + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Create: Creates a permission for a file. +func (r *PermissionsService) Create(fileId string, permission *Permission) *PermissionsCreateCall { + c := &PermissionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.permission = permission + return c +} + +// EmailMessage sets the optional parameter "emailMessage": A custom +// message to include in the notification email. +func (c *PermissionsCreateCall) EmailMessage(emailMessage string) *PermissionsCreateCall { + c.urlParams_.Set("emailMessage", emailMessage) + return c +} + +// SendNotificationEmail sets the optional parameter +// "sendNotificationEmail": Whether to send a notification email when +// sharing to users or groups. This defaults to true for users and +// groups, and is not allowed for other requests. It must not be +// disabled for ownership transfers. +func (c *PermissionsCreateCall) SendNotificationEmail(sendNotificationEmail bool) *PermissionsCreateCall { + c.urlParams_.Set("sendNotificationEmail", fmt.Sprint(sendNotificationEmail)) + return c +} + +// TransferOwnership sets the optional parameter "transferOwnership": +// Whether to transfer ownership to the specified user and downgrade the +// current owner to a writer. This parameter is required as an +// acknowledgement of the side effect. +func (c *PermissionsCreateCall) TransferOwnership(transferOwnership bool) *PermissionsCreateCall { + c.urlParams_.Set("transferOwnership", fmt.Sprint(transferOwnership)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PermissionsCreateCall) Fields(s ...googleapi.Field) *PermissionsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PermissionsCreateCall) Context(ctx context.Context) *PermissionsCreateCall { + c.ctx_ = ctx + return c +} + +func (c *PermissionsCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.permission) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.permissions.create" call. +// Exactly one of *Permission or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Permission.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PermissionsCreateCall) Do(opts ...googleapi.CallOption) (*Permission, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Permission{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a permission for a file.", + // "httpMethod": "POST", + // "id": "drive.permissions.create", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "emailMessage": { + // "description": "A custom message to include in the notification email.", + // "location": "query", + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sendNotificationEmail": { + // "description": "Whether to send a notification email when sharing to users or groups. This defaults to true for users and groups, and is not allowed for other requests. It must not be disabled for ownership transfers.", + // "location": "query", + // "type": "boolean" + // }, + // "transferOwnership": { + // "default": "false", + // "description": "Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of the side effect.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "files/{fileId}/permissions", + // "request": { + // "$ref": "Permission" + // }, + // "response": { + // "$ref": "Permission" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.permissions.delete": + +type PermissionsDeleteCall struct { + s *Service + fileId string + permissionId string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes a permission. +func (r *PermissionsService) Delete(fileId string, permissionId string) *PermissionsDeleteCall { + c := &PermissionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.permissionId = permissionId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PermissionsDeleteCall) Fields(s ...googleapi.Field) *PermissionsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PermissionsDeleteCall) Context(ctx context.Context) *PermissionsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *PermissionsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions/{permissionId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "permissionId": c.permissionId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.permissions.delete" call. +func (c *PermissionsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes a permission.", + // "httpMethod": "DELETE", + // "id": "drive.permissions.delete", + // "parameterOrder": [ + // "fileId", + // "permissionId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "permissionId": { + // "description": "The ID of the permission.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/permissions/{permissionId}", + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.permissions.get": + +type PermissionsGetCall struct { + s *Service + fileId string + permissionId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets a permission by ID. +func (r *PermissionsService) Get(fileId string, permissionId string) *PermissionsGetCall { + c := &PermissionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.permissionId = permissionId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PermissionsGetCall) Fields(s ...googleapi.Field) *PermissionsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *PermissionsGetCall) IfNoneMatch(entityTag string) *PermissionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PermissionsGetCall) Context(ctx context.Context) *PermissionsGetCall { + c.ctx_ = ctx + return c +} + +func (c *PermissionsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions/{permissionId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "permissionId": c.permissionId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.permissions.get" call. +// Exactly one of *Permission or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Permission.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PermissionsGetCall) Do(opts ...googleapi.CallOption) (*Permission, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Permission{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a permission by ID.", + // "httpMethod": "GET", + // "id": "drive.permissions.get", + // "parameterOrder": [ + // "fileId", + // "permissionId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "permissionId": { + // "description": "The ID of the permission.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/permissions/{permissionId}", + // "response": { + // "$ref": "Permission" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// method id "drive.permissions.list": + +type PermissionsListCall struct { + s *Service + fileId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists a file's permissions. +func (r *PermissionsService) List(fileId string) *PermissionsListCall { + c := &PermissionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PermissionsListCall) Fields(s ...googleapi.Field) *PermissionsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *PermissionsListCall) IfNoneMatch(entityTag string) *PermissionsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PermissionsListCall) Context(ctx context.Context) *PermissionsListCall { + c.ctx_ = ctx + return c +} + +func (c *PermissionsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.permissions.list" call. +// Exactly one of *PermissionList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *PermissionList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *PermissionsListCall) Do(opts ...googleapi.CallOption) (*PermissionList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &PermissionList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists a file's permissions.", + // "httpMethod": "GET", + // "id": "drive.permissions.list", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/permissions", + // "response": { + // "$ref": "PermissionList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// method id "drive.permissions.update": + +type PermissionsUpdateCall struct { + s *Service + fileId string + permissionId string + permission *Permission + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates a permission with patch semantics. +func (r *PermissionsService) Update(fileId string, permissionId string, permission *Permission) *PermissionsUpdateCall { + c := &PermissionsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.permissionId = permissionId + c.permission = permission + return c +} + +// TransferOwnership sets the optional parameter "transferOwnership": +// Whether to transfer ownership to the specified user and downgrade the +// current owner to a writer. This parameter is required as an +// acknowledgement of the side effect. +func (c *PermissionsUpdateCall) TransferOwnership(transferOwnership bool) *PermissionsUpdateCall { + c.urlParams_.Set("transferOwnership", fmt.Sprint(transferOwnership)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PermissionsUpdateCall) Fields(s ...googleapi.Field) *PermissionsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PermissionsUpdateCall) Context(ctx context.Context) *PermissionsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *PermissionsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.permission) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions/{permissionId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "permissionId": c.permissionId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.permissions.update" call. +// Exactly one of *Permission or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Permission.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PermissionsUpdateCall) Do(opts ...googleapi.CallOption) (*Permission, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Permission{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a permission with patch semantics.", + // "httpMethod": "PATCH", + // "id": "drive.permissions.update", + // "parameterOrder": [ + // "fileId", + // "permissionId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "permissionId": { + // "description": "The ID of the permission.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "transferOwnership": { + // "default": "false", + // "description": "Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of the side effect.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "files/{fileId}/permissions/{permissionId}", + // "request": { + // "$ref": "Permission" + // }, + // "response": { + // "$ref": "Permission" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.replies.create": + +type RepliesCreateCall struct { + s *Service + fileId string + commentId string + reply *Reply + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Create: Creates a new reply to a comment. +func (r *RepliesService) Create(fileId string, commentId string, reply *Reply) *RepliesCreateCall { + c := &RepliesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.commentId = commentId + c.reply = reply + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RepliesCreateCall) Fields(s ...googleapi.Field) *RepliesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RepliesCreateCall) Context(ctx context.Context) *RepliesCreateCall { + c.ctx_ = ctx + return c +} + +func (c *RepliesCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.reply) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "commentId": c.commentId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.replies.create" call. +// Exactly one of *Reply or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Reply.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RepliesCreateCall) Do(opts ...googleapi.CallOption) (*Reply, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Reply{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new reply to a comment.", + // "httpMethod": "POST", + // "id": "drive.replies.create", + // "parameterOrder": [ + // "fileId", + // "commentId" + // ], + // "parameters": { + // "commentId": { + // "description": "The ID of the comment.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments/{commentId}/replies", + // "request": { + // "$ref": "Reply" + // }, + // "response": { + // "$ref": "Reply" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.replies.delete": + +type RepliesDeleteCall struct { + s *Service + fileId string + commentId string + replyId string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes a reply. +func (r *RepliesService) Delete(fileId string, commentId string, replyId string) *RepliesDeleteCall { + c := &RepliesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.commentId = commentId + c.replyId = replyId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RepliesDeleteCall) Fields(s ...googleapi.Field) *RepliesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RepliesDeleteCall) Context(ctx context.Context) *RepliesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *RepliesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies/{replyId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "commentId": c.commentId, + "replyId": c.replyId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.replies.delete" call. +func (c *RepliesDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes a reply.", + // "httpMethod": "DELETE", + // "id": "drive.replies.delete", + // "parameterOrder": [ + // "fileId", + // "commentId", + // "replyId" + // ], + // "parameters": { + // "commentId": { + // "description": "The ID of the comment.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "replyId": { + // "description": "The ID of the reply.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.replies.get": + +type RepliesGetCall struct { + s *Service + fileId string + commentId string + replyId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets a reply by ID. +func (r *RepliesService) Get(fileId string, commentId string, replyId string) *RepliesGetCall { + c := &RepliesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.commentId = commentId + c.replyId = replyId + return c +} + +// IncludeDeleted sets the optional parameter "includeDeleted": Whether +// to return deleted replies. Deleted replies will not include their +// original content. +func (c *RepliesGetCall) IncludeDeleted(includeDeleted bool) *RepliesGetCall { + c.urlParams_.Set("includeDeleted", fmt.Sprint(includeDeleted)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RepliesGetCall) Fields(s ...googleapi.Field) *RepliesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RepliesGetCall) IfNoneMatch(entityTag string) *RepliesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RepliesGetCall) Context(ctx context.Context) *RepliesGetCall { + c.ctx_ = ctx + return c +} + +func (c *RepliesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies/{replyId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "commentId": c.commentId, + "replyId": c.replyId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.replies.get" call. +// Exactly one of *Reply or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Reply.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RepliesGetCall) Do(opts ...googleapi.CallOption) (*Reply, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Reply{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a reply by ID.", + // "httpMethod": "GET", + // "id": "drive.replies.get", + // "parameterOrder": [ + // "fileId", + // "commentId", + // "replyId" + // ], + // "parameters": { + // "commentId": { + // "description": "The ID of the comment.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "includeDeleted": { + // "default": "false", + // "description": "Whether to return deleted replies. Deleted replies will not include their original content.", + // "location": "query", + // "type": "boolean" + // }, + // "replyId": { + // "description": "The ID of the reply.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", + // "response": { + // "$ref": "Reply" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// method id "drive.replies.list": + +type RepliesListCall struct { + s *Service + fileId string + commentId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists a comment's replies. +func (r *RepliesService) List(fileId string, commentId string) *RepliesListCall { + c := &RepliesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.commentId = commentId + return c +} + +// IncludeDeleted sets the optional parameter "includeDeleted": Whether +// to include deleted replies. Deleted replies will not include their +// original content. +func (c *RepliesListCall) IncludeDeleted(includeDeleted bool) *RepliesListCall { + c.urlParams_.Set("includeDeleted", fmt.Sprint(includeDeleted)) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of replies to return per page. +func (c *RepliesListCall) PageSize(pageSize int64) *RepliesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The token for +// continuing a previous list request on the next page. This should be +// set to the value of 'nextPageToken' from the previous response. +func (c *RepliesListCall) PageToken(pageToken string) *RepliesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RepliesListCall) Fields(s ...googleapi.Field) *RepliesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RepliesListCall) IfNoneMatch(entityTag string) *RepliesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RepliesListCall) Context(ctx context.Context) *RepliesListCall { + c.ctx_ = ctx + return c +} + +func (c *RepliesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "commentId": c.commentId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.replies.list" call. +// Exactly one of *ReplyList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ReplyList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RepliesListCall) Do(opts ...googleapi.CallOption) (*ReplyList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ReplyList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists a comment's replies.", + // "httpMethod": "GET", + // "id": "drive.replies.list", + // "parameterOrder": [ + // "fileId", + // "commentId" + // ], + // "parameters": { + // "commentId": { + // "description": "The ID of the comment.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "includeDeleted": { + // "default": "false", + // "description": "Whether to include deleted replies. Deleted replies will not include their original content.", + // "location": "query", + // "type": "boolean" + // }, + // "pageSize": { + // "default": "20", + // "description": "The maximum number of replies to return per page.", + // "format": "int32", + // "location": "query", + // "maximum": "100", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments/{commentId}/replies", + // "response": { + // "$ref": "ReplyList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RepliesListCall) Pages(ctx context.Context, f func(*ReplyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "drive.replies.update": + +type RepliesUpdateCall struct { + s *Service + fileId string + commentId string + replyId string + reply *Reply + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates a reply with patch semantics. +func (r *RepliesService) Update(fileId string, commentId string, replyId string, reply *Reply) *RepliesUpdateCall { + c := &RepliesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.commentId = commentId + c.replyId = replyId + c.reply = reply + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RepliesUpdateCall) Fields(s ...googleapi.Field) *RepliesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RepliesUpdateCall) Context(ctx context.Context) *RepliesUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *RepliesUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.reply) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies/{replyId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "commentId": c.commentId, + "replyId": c.replyId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.replies.update" call. +// Exactly one of *Reply or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Reply.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RepliesUpdateCall) Do(opts ...googleapi.CallOption) (*Reply, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Reply{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a reply with patch semantics.", + // "httpMethod": "PATCH", + // "id": "drive.replies.update", + // "parameterOrder": [ + // "fileId", + // "commentId", + // "replyId" + // ], + // "parameters": { + // "commentId": { + // "description": "The ID of the comment.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "replyId": { + // "description": "The ID of the reply.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/comments/{commentId}/replies/{replyId}", + // "request": { + // "$ref": "Reply" + // }, + // "response": { + // "$ref": "Reply" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.revisions.delete": + +type RevisionsDeleteCall struct { + s *Service + fileId string + revisionId string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Permanently deletes a revision. This method is only +// applicable to files with binary content in Drive. +func (r *RevisionsService) Delete(fileId string, revisionId string) *RevisionsDeleteCall { + c := &RevisionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.revisionId = revisionId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RevisionsDeleteCall) Fields(s ...googleapi.Field) *RevisionsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RevisionsDeleteCall) Context(ctx context.Context) *RevisionsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *RevisionsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions/{revisionId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "revisionId": c.revisionId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.revisions.delete" call. +func (c *RevisionsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes a revision. This method is only applicable to files with binary content in Drive.", + // "httpMethod": "DELETE", + // "id": "drive.revisions.delete", + // "parameterOrder": [ + // "fileId", + // "revisionId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "revisionId": { + // "description": "The ID of the revision.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/revisions/{revisionId}", + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} + +// method id "drive.revisions.get": + +type RevisionsGetCall struct { + s *Service + fileId string + revisionId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets a revision's metadata or content by ID. +func (r *RevisionsService) Get(fileId string, revisionId string) *RevisionsGetCall { + c := &RevisionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.revisionId = revisionId + return c +} + +// AcknowledgeAbuse sets the optional parameter "acknowledgeAbuse": +// Whether the user is acknowledging the risk of downloading known +// malware or other abusive files. This is only applicable when +// alt=media. +func (c *RevisionsGetCall) AcknowledgeAbuse(acknowledgeAbuse bool) *RevisionsGetCall { + c.urlParams_.Set("acknowledgeAbuse", fmt.Sprint(acknowledgeAbuse)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RevisionsGetCall) Fields(s ...googleapi.Field) *RevisionsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RevisionsGetCall) IfNoneMatch(entityTag string) *RevisionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *RevisionsGetCall) Context(ctx context.Context) *RevisionsGetCall { + c.ctx_ = ctx + return c +} + +func (c *RevisionsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions/{revisionId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "revisionId": c.revisionId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *RevisionsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "drive.revisions.get" call. +// Exactly one of *Revision or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Revision.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RevisionsGetCall) Do(opts ...googleapi.CallOption) (*Revision, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Revision{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a revision's metadata or content by ID.", + // "httpMethod": "GET", + // "id": "drive.revisions.get", + // "parameterOrder": [ + // "fileId", + // "revisionId" + // ], + // "parameters": { + // "acknowledgeAbuse": { + // "default": "false", + // "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.", + // "location": "query", + // "type": "boolean" + // }, + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "revisionId": { + // "description": "The ID of the revision.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/revisions/{revisionId}", + // "response": { + // "$ref": "Revision" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "drive.revisions.list": + +type RevisionsListCall struct { + s *Service + fileId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists a file's revisions. +func (r *RevisionsService) List(fileId string) *RevisionsListCall { + c := &RevisionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RevisionsListCall) Fields(s ...googleapi.Field) *RevisionsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RevisionsListCall) IfNoneMatch(entityTag string) *RevisionsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RevisionsListCall) Context(ctx context.Context) *RevisionsListCall { + c.ctx_ = ctx + return c +} + +func (c *RevisionsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.revisions.list" call. +// Exactly one of *RevisionList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RevisionList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RevisionsListCall) Do(opts ...googleapi.CallOption) (*RevisionList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RevisionList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists a file's revisions.", + // "httpMethod": "GET", + // "id": "drive.revisions.list", + // "parameterOrder": [ + // "fileId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/revisions", + // "response": { + // "$ref": "RevisionList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file", + // "https://www.googleapis.com/auth/drive.metadata", + // "https://www.googleapis.com/auth/drive.metadata.readonly", + // "https://www.googleapis.com/auth/drive.photos.readonly", + // "https://www.googleapis.com/auth/drive.readonly" + // ] + // } + +} + +// method id "drive.revisions.update": + +type RevisionsUpdateCall struct { + s *Service + fileId string + revisionId string + revision *Revision + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates a revision with patch semantics. +func (r *RevisionsService) Update(fileId string, revisionId string, revision *Revision) *RevisionsUpdateCall { + c := &RevisionsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.fileId = fileId + c.revisionId = revisionId + c.revision = revision + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RevisionsUpdateCall) Fields(s ...googleapi.Field) *RevisionsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RevisionsUpdateCall) Context(ctx context.Context) *RevisionsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *RevisionsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.revision) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions/{revisionId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "fileId": c.fileId, + "revisionId": c.revisionId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "drive.revisions.update" call. +// Exactly one of *Revision or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Revision.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RevisionsUpdateCall) Do(opts ...googleapi.CallOption) (*Revision, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Revision{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a revision with patch semantics.", + // "httpMethod": "PATCH", + // "id": "drive.revisions.update", + // "parameterOrder": [ + // "fileId", + // "revisionId" + // ], + // "parameters": { + // "fileId": { + // "description": "The ID of the file.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "revisionId": { + // "description": "The ID of the revision.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "files/{fileId}/revisions/{revisionId}", + // "request": { + // "$ref": "Revision" + // }, + // "response": { + // "$ref": "Revision" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/drive", + // "https://www.googleapis.com/auth/drive.appdata", + // "https://www.googleapis.com/auth/drive.file" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/gensupport/backoff.go b/vendor/google.golang.org/api/gensupport/backoff.go new file mode 100644 index 00000000..13561404 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/backoff.go @@ -0,0 +1,46 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "math/rand" + "time" +) + +type BackoffStrategy interface { + // Pause returns the duration of the next pause and true if the operation should be + // retried, or false if no further retries should be attempted. + Pause() (time.Duration, bool) + + // Reset restores the strategy to its initial state. + Reset() +} + +// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff. +// The initial pause time is given by Base. +// Once the total pause time exceeds Max, Pause will indicate no further retries. +type ExponentialBackoff struct { + Base time.Duration + Max time.Duration + total time.Duration + n uint +} + +func (eb *ExponentialBackoff) Pause() (time.Duration, bool) { + if eb.total > eb.Max { + return 0, false + } + + // The next pause is selected from randomly from [0, 2^n * Base). + d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base))) + eb.total += d + eb.n++ + return d, true +} + +func (eb *ExponentialBackoff) Reset() { + eb.n = 0 + eb.total = 0 +} diff --git a/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/google.golang.org/api/gensupport/buffer.go new file mode 100644 index 00000000..4b8ec142 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/buffer.go @@ -0,0 +1,77 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "bytes" + "io" + + "google.golang.org/api/googleapi" +) + +// ResumableBuffer buffers data from an io.Reader to support uploading media in retryable chunks. +type ResumableBuffer struct { + media io.Reader + + chunk []byte // The current chunk which is pending upload. The capacity is the chunk size. + err error // Any error generated when populating chunk by reading media. + + // The absolute position of chunk in the underlying media. + off int64 +} + +func NewResumableBuffer(media io.Reader, chunkSize int) *ResumableBuffer { + return &ResumableBuffer{media: media, chunk: make([]byte, 0, chunkSize)} +} + +// Chunk returns the current buffered chunk, the offset in the underlying media +// from which the chunk is drawn, and the size of the chunk. +// Successive calls to Chunk return the same chunk between calls to Next. +func (rb *ResumableBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { + // There may already be data in chunk if Next has not been called since the previous call to Chunk. + if rb.err == nil && len(rb.chunk) == 0 { + rb.err = rb.loadChunk() + } + return bytes.NewReader(rb.chunk), rb.off, len(rb.chunk), rb.err +} + +// loadChunk will read from media into chunk, up to the capacity of chunk. +func (rb *ResumableBuffer) loadChunk() error { + bufSize := cap(rb.chunk) + rb.chunk = rb.chunk[:bufSize] + + read := 0 + var err error + for err == nil && read < bufSize { + var n int + n, err = rb.media.Read(rb.chunk[read:]) + read += n + } + rb.chunk = rb.chunk[:read] + return err +} + +// Next advances to the next chunk, which will be returned by the next call to Chunk. +// Calls to Next without a corresponding prior call to Chunk will have no effect. +func (rb *ResumableBuffer) Next() { + rb.off += int64(len(rb.chunk)) + rb.chunk = rb.chunk[0:0] +} + +type readerTyper struct { + io.Reader + googleapi.ContentTyper +} + +// ReaderAtToReader adapts a ReaderAt to be used as a Reader. +// If ra implements googleapi.ContentTyper, then the returned reader +// will also implement googleapi.ContentTyper, delegating to ra. +func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader { + r := io.NewSectionReader(ra, 0, size) + if typer, ok := ra.(googleapi.ContentTyper); ok { + return readerTyper{r, typer} + } + return r +} diff --git a/vendor/google.golang.org/api/gensupport/doc.go b/vendor/google.golang.org/api/gensupport/doc.go new file mode 100644 index 00000000..752c4b41 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/doc.go @@ -0,0 +1,10 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gensupport is an internal implementation detail used by code +// generated by the google-api-go-generator tool. +// +// This package may be modified at any time without regard for backwards +// compatibility. It should not be used directly by API users. +package gensupport diff --git a/vendor/google.golang.org/api/gensupport/json.go b/vendor/google.golang.org/api/gensupport/json.go new file mode 100644 index 00000000..dd7bcd2e --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/json.go @@ -0,0 +1,172 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +// MarshalJSON returns a JSON encoding of schema containing only selected fields. +// A field is selected if: +// * it has a non-empty value, or +// * its field name is present in forceSendFields, and +// * it is not a nil pointer or nil interface. +// The JSON key for each selected field is taken from the field's json: struct tag. +func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) { + if len(forceSendFields) == 0 { + return json.Marshal(schema) + } + + mustInclude := make(map[string]struct{}) + for _, f := range forceSendFields { + mustInclude[f] = struct{}{} + } + + dataMap, err := schemaToMap(schema, mustInclude) + if err != nil { + return nil, err + } + return json.Marshal(dataMap) +} + +func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) { + m := make(map[string]interface{}) + s := reflect.ValueOf(schema) + st := s.Type() + + for i := 0; i < s.NumField(); i++ { + jsonTag := st.Field(i).Tag.Get("json") + if jsonTag == "" { + continue + } + tag, err := parseJSONTag(jsonTag) + if err != nil { + return nil, err + } + if tag.ignore { + continue + } + + v := s.Field(i) + f := st.Field(i) + if !includeField(v, f, mustInclude) { + continue + } + + // nil maps are treated as empty maps. + if f.Type.Kind() == reflect.Map && v.IsNil() { + m[tag.apiName] = map[string]string{} + continue + } + + // nil slices are treated as empty slices. + if f.Type.Kind() == reflect.Slice && v.IsNil() { + m[tag.apiName] = []bool{} + continue + } + + if tag.stringFormat { + m[tag.apiName] = formatAsString(v, f.Type.Kind()) + } else { + m[tag.apiName] = v.Interface() + } + } + return m, nil +} + +// formatAsString returns a string representation of v, dereferencing it first if possible. +func formatAsString(v reflect.Value, kind reflect.Kind) string { + if kind == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + + return fmt.Sprintf("%v", v.Interface()) +} + +// jsonTag represents a restricted version of the struct tag format used by encoding/json. +// It is used to describe the JSON encoding of fields in a Schema struct. +type jsonTag struct { + apiName string + stringFormat bool + ignore bool +} + +// parseJSONTag parses a restricted version of the struct tag format used by encoding/json. +// The format of the tag must match that generated by the Schema.writeSchemaStruct method +// in the api generator. +func parseJSONTag(val string) (jsonTag, error) { + if val == "-" { + return jsonTag{ignore: true}, nil + } + + var tag jsonTag + + i := strings.Index(val, ",") + if i == -1 || val[:i] == "" { + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + tag = jsonTag{ + apiName: val[:i], + } + + switch val[i+1:] { + case "omitempty": + case "omitempty,string": + tag.stringFormat = true + default: + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + return tag, nil +} + +// Reports whether the struct field "f" with value "v" should be included in JSON output. +func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool { + // The regular JSON encoding of a nil pointer is "null", which means "delete this field". + // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set. + // However, many fields are not pointers, so there would be no way to delete these fields. + // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields. + // Deletion will be handled by a separate mechanism. + if f.Type.Kind() == reflect.Ptr && v.IsNil() { + return false + } + + // The "any" type is represented as an interface{}. If this interface + // is nil, there is no reasonable representation to send. We ignore + // these fields, for the same reasons as given above for pointers. + if f.Type.Kind() == reflect.Interface && v.IsNil() { + return false + } + + _, ok := mustInclude[f.Name] + return ok || !isEmptyValue(v) +} + +// isEmptyValue reports whether v is the empty value for its type. This +// implementation is based on that of the encoding/json package, but its +// correctness does not depend on it being identical. What's important is that +// this function return false in situations where v should not be sent as part +// of a PATCH operation. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/gensupport/media.go new file mode 100644 index 00000000..817f46f5 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/media.go @@ -0,0 +1,200 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/textproto" + + "google.golang.org/api/googleapi" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// If the content type is already known, it can be specified via ctype. +// Otherwise, the content of media will be sniffed to determine the content type. +// If media implements googleapi.ContentTyper (deprecated), this will be used +// instead of sniffing the content. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { + // Note: callers could avoid calling DetectContentType if ctype != "", + // but doing the check inside this function reduces the amount of + // generated code. + if ctype != "" { + return media, ctype + } + + // For backwards compatability, allow clients to set content + // type by providing a ContentTyper for media. + if typer, ok := media.(googleapi.ContentTyper); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} + +type typeReader struct { + io.Reader + typ string +} + +// multipartReader combines the contents of multiple readers to creat a multipart/related HTTP body. +// Close must be called if reads from the multipartReader are abandoned before reaching EOF. +type multipartReader struct { + pr *io.PipeReader + pipeOpen bool + ctype string +} + +func newMultipartReader(parts []typeReader) *multipartReader { + mp := &multipartReader{pipeOpen: true} + var pw *io.PipeWriter + mp.pr, pw = io.Pipe() + mpw := multipart.NewWriter(pw) + mp.ctype = "multipart/related; boundary=" + mpw.Boundary() + go func() { + for _, part := range parts { + w, err := mpw.CreatePart(typeHeader(part.typ)) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err)) + return + } + _, err = io.Copy(w, part.Reader) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err)) + return + } + } + + mpw.Close() + pw.Close() + }() + return mp +} + +func (mp *multipartReader) Read(data []byte) (n int, err error) { + return mp.pr.Read(data) +} + +func (mp *multipartReader) Close() error { + if !mp.pipeOpen { + return nil + } + mp.pipeOpen = false + return mp.pr.Close() +} + +// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body. +// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary. +// +// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF. +func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) { + mp := newMultipartReader([]typeReader{ + {body, bodyContentType}, + {media, mediaContentType}, + }) + return mp, mp.ctype +} + +func typeHeader(contentType string) textproto.MIMEHeader { + h := make(textproto.MIMEHeader) + if contentType != "" { + h.Set("Content-Type", contentType) + } + return h +} + +// PrepareUpload determines whether the data in the supplied reader should be +// uploaded in a single request, or in sequential chunks. +// chunkSize is the size of the chunk that media should be split into. +// If chunkSize is non-zero and the contents of media do not fit in a single +// chunk (or there is an error reading media), then media will be returned as a +// ResumableBuffer. Otherwise, media will be returned as a Reader. +// +// After PrepareUpload has been called, media should no longer be used: the +// media content should be accessed via one of the return values. +func PrepareUpload(media io.Reader, chunkSize int) (io.Reader, + *ResumableBuffer) { + if chunkSize == 0 { // do not chunk + return media, nil + } + + rb := NewResumableBuffer(media, chunkSize) + rdr, _, _, err := rb.Chunk() + + if err == io.EOF { // we can upload this in a single request + return rdr, nil + } + // err might be a non-EOF error. If it is, the next call to rb.Chunk will + // return the same error. Returning a ResumableBuffer ensures that this error + // will be handled at some point. + + return nil, rb +} diff --git a/vendor/google.golang.org/api/gensupport/params.go b/vendor/google.golang.org/api/gensupport/params.go new file mode 100644 index 00000000..3b3c7439 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/params.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "net/url" + + "google.golang.org/api/googleapi" +) + +// URLParams is a simplified replacement for url.Values +// that safely builds up URL parameters for encoding. +type URLParams map[string][]string + +// Get returns the first value for the given key, or "". +func (u URLParams) Get(key string) string { + vs := u[key] + if len(vs) == 0 { + return "" + } + return vs[0] +} + +// Set sets the key to value. +// It replaces any existing values. +func (u URLParams) Set(key, value string) { + u[key] = []string{value} +} + +// SetMulti sets the key to an array of values. +// It replaces any existing values. +// Note that values must not be modified after calling SetMulti +// so the caller is responsible for making a copy if necessary. +func (u URLParams) SetMulti(key string, values []string) { + u[key] = values +} + +// Encode encodes the values into ``URL encoded'' form +// ("bar=baz&foo=quux") sorted by key. +func (u URLParams) Encode() string { + return url.Values(u).Encode() +} + +func SetOptions(u URLParams, opts ...googleapi.CallOption) { + for _, o := range opts { + u.Set(o.Get()) + } +} diff --git a/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/google.golang.org/api/gensupport/resumable.go new file mode 100644 index 00000000..b3e774aa --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/resumable.go @@ -0,0 +1,198 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "io" + "net/http" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +const ( + // statusResumeIncomplete is the code returned by the Google uploader + // when the transfer is not yet complete. + statusResumeIncomplete = 308 + + // statusTooManyRequests is returned by the storage API if the + // per-project limits have been temporarily exceeded. The request + // should be retried. + // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes + statusTooManyRequests = 429 +) + +// ResumableUpload is used by the generated APIs to provide resumable uploads. +// It is not used by developers directly. +type ResumableUpload struct { + Client *http.Client + // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". + URI string + UserAgent string // User-Agent for header of the request + // Media is the object being uploaded. + Media *ResumableBuffer + // MediaType defines the media type, e.g. "image/jpeg". + MediaType string + + mu sync.Mutex // guards progress + progress int64 // number of bytes uploaded so far + + // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded. + Callback func(int64) + + // If not specified, a default exponential backoff strategy will be used. + Backoff BackoffStrategy +} + +// Progress returns the number of bytes uploaded at this point. +func (rx *ResumableUpload) Progress() int64 { + rx.mu.Lock() + defer rx.mu.Unlock() + return rx.progress +} + +// doUploadRequest performs a single HTTP request to upload data. +// off specifies the offset in rx.Media from which data is drawn. +// size is the number of bytes in data. +// final specifies whether data is the final chunk to be uploaded. +func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) { + req, err := http.NewRequest("POST", rx.URI, data) + if err != nil { + return nil, err + } + + req.ContentLength = size + var contentRange string + if final { + if size == 0 { + contentRange = fmt.Sprintf("bytes */%v", off) + } else { + contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size) + } + } else { + contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1) + } + req.Header.Set("Content-Range", contentRange) + req.Header.Set("Content-Type", rx.MediaType) + req.Header.Set("User-Agent", rx.UserAgent) + return ctxhttp.Do(ctx, rx.Client, req) + +} + +// reportProgress calls a user-supplied callback to report upload progress. +// If old==updated, the callback is not called. +func (rx *ResumableUpload) reportProgress(old, updated int64) { + if updated-old == 0 { + return + } + rx.mu.Lock() + rx.progress = updated + rx.mu.Unlock() + if rx.Callback != nil { + rx.Callback(updated) + } +} + +// transferChunk performs a single HTTP request to upload a single chunk from rx.Media. +func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) { + chunk, off, size, err := rx.Media.Chunk() + + done := err == io.EOF + if !done && err != nil { + return nil, err + } + + res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done) + if err != nil { + return res, err + } + + if res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK { + rx.reportProgress(off, off+int64(size)) + } + + if res.StatusCode == statusResumeIncomplete { + rx.Media.Next() + } + return res, nil +} + +func contextDone(ctx context.Context) bool { + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +// Upload starts the process of a resumable upload with a cancellable context. +// It retries using the provided back off strategy until cancelled or the +// strategy indicates to stop retrying. +// It is called from the auto-generated API code and is not visible to the user. +// rx is private to the auto-generated API code. +// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. +func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { + var pause time.Duration + backoff := rx.Backoff + if backoff == nil { + backoff = DefaultBackoffStrategy() + } + + for { + // Ensure that we return in the case of cancelled context, even if pause is 0. + if contextDone(ctx) { + return nil, ctx.Err() + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(pause): + } + + resp, err = rx.transferChunk(ctx) + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Check if we should retry the request. + if shouldRetry(status, err) { + var retry bool + pause, retry = backoff.Pause() + if retry { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + continue + } + } + + // If the chunk was uploaded successfully, but there's still + // more to go, upload the next chunk without any delay. + if status == statusResumeIncomplete { + pause = 0 + backoff.Reset() + resp.Body.Close() + continue + } + + // It's possible for err and resp to both be non-nil here, but we expose a simpler + // contract to our callers: exactly one of resp and err will be non-nil. This means + // that any response body must be closed here before returning a non-nil error. + if err != nil { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return nil, err + } + + return resp, nil + } +} diff --git a/vendor/google.golang.org/api/gensupport/retry.go b/vendor/google.golang.org/api/gensupport/retry.go new file mode 100644 index 00000000..7f83d1da --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/retry.go @@ -0,0 +1,77 @@ +package gensupport + +import ( + "io" + "net" + "net/http" + "time" + + "golang.org/x/net/context" +) + +// Retry invokes the given function, retrying it multiple times if the connection failed or +// the HTTP status response indicates the request should be attempted again. ctx may be nil. +func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) { + for { + resp, err := f() + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Return if we shouldn't retry. + pause, retry := backoff.Pause() + if !shouldRetry(status, err) || !retry { + return resp, err + } + + // Ensure the response body is closed, if any. + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + + // Pause, but still listen to ctx.Done if context is not nil. + var done <-chan struct{} + if ctx != nil { + done = ctx.Done() + } + select { + case <-done: + return nil, ctx.Err() + case <-time.After(pause): + } + } +} + +// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests. +func DefaultBackoffStrategy() BackoffStrategy { + return &ExponentialBackoff{ + Base: 250 * time.Millisecond, + Max: 16 * time.Second, + } +} + +// shouldRetry returns true if the HTTP response / error indicates that the +// request should be attempted again. +func shouldRetry(status int, err error) bool { + // Retry for 5xx response codes. + if 500 <= status && status < 600 { + return true + } + + // Retry on statusTooManyRequests{ + if status == statusTooManyRequests { + return true + } + + // Retry on unexpected EOFs and temporary network errors. + if err == io.ErrUnexpectedEOF { + return true + } + if err, ok := err.(net.Error); ok { + return err.Temporary() + } + + return false +} diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go new file mode 100644 index 00000000..858537e0 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -0,0 +1,432 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package googleapi contains the common code shared by all Google API +// libraries. +package googleapi + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "google.golang.org/api/googleapi/internal/uritemplates" +) + +// ContentTyper is an interface for Readers which know (or would like +// to override) their Content-Type. If a media body doesn't implement +// ContentTyper, the type is sniffed from the content using +// http.DetectContentType. +type ContentTyper interface { + ContentType() string +} + +// A SizeReaderAt is a ReaderAt with a Size method. +// An io.SectionReader implements SizeReaderAt. +type SizeReaderAt interface { + io.ReaderAt + Size() int64 +} + +// ServerResponse is embedded in each Do response and +// provides the HTTP status code and header sent by the server. +type ServerResponse struct { + // HTTPStatusCode is the server's response status code. + // When using a resource method's Do call, this will always be in the 2xx range. + HTTPStatusCode int + // Header contains the response header fields from the server. + Header http.Header +} + +const ( + Version = "0.5" + + // UserAgent is the header string used to identify this package. + UserAgent = "google-api-go-client/" + Version + + // The default chunk size to use for resumable uplods if not specified by the user. + DefaultUploadChunkSize = 8 * 1024 * 1024 + + // The minimum chunk size that can be used for resumable uploads. All + // user-specified chunk sizes must be multiple of this value. + MinUploadChunkSize = 256 * 1024 +) + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code and will always be populated. + Code int `json:"code"` + // Message is the server response message and is only populated when + // explicitly referenced by the JSON server response. + Message string `json:"message"` + // Body is the raw response returned by the server. + // It is often but not always JSON, depending on how the request fails. + Body string + // Header contains the response header fields from the server. + Header http.Header + + Errors []ErrorItem +} + +// ErrorItem is a detailed error code & message from the Google API frontend. +type ErrorItem struct { + // Reason is the typed error code. For example: "some_example". + Reason string `json:"reason"` + // Message is the human-readable description of the error. + Message string `json:"message"` +} + +func (e *Error) Error() string { + if len(e.Errors) == 0 && e.Message == "" { + return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body) + } + var buf bytes.Buffer + fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code) + if e.Message != "" { + fmt.Fprintf(&buf, "%s", e.Message) + } + if len(e.Errors) == 0 { + return strings.TrimSpace(buf.String()) + } + if len(e.Errors) == 1 && e.Errors[0].Message == e.Message { + fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason) + return buf.String() + } + fmt.Fprintln(&buf, "\nMore details:") + for _, v := range e.Errors { + fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message) + } + return buf.String() +} + +type errorReply struct { + Error *Error `json:"error"` +} + +// CheckResponse returns an error (of type *Error) if the response +// status code is not 2xx. +func CheckResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, err := ioutil.ReadAll(res.Body) + if err == nil { + jerr := new(errorReply) + err = json.Unmarshal(slurp, jerr) + if err == nil && jerr.Error != nil { + if jerr.Error.Code == 0 { + jerr.Error.Code = res.StatusCode + } + jerr.Error.Body = string(slurp) + return jerr.Error + } + } + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + Header: res.Header, + } +} + +// IsNotModified reports whether err is the result of the +// server replying with http.StatusNotModified. +// Such error values are sometimes returned by "Do" methods +// on calls when If-None-Match is used. +func IsNotModified(err error) bool { + if err == nil { + return false + } + ae, ok := err.(*Error) + return ok && ae.Code == http.StatusNotModified +} + +// CheckMediaResponse returns an error (of type *Error) if the response +// status code is not 2xx. Unlike CheckResponse it does not assume the +// body is a JSON error document. +func CheckMediaResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) + res.Body.Close() + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + } +} + +type MarshalStyle bool + +var WithDataWrapper = MarshalStyle(true) +var WithoutDataWrapper = MarshalStyle(false) + +func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { + buf := new(bytes.Buffer) + if wrap { + buf.Write([]byte(`{"data": `)) + } + err := json.NewEncoder(buf).Encode(v) + if err != nil { + return nil, err + } + if wrap { + buf.Write([]byte(`}`)) + } + return buf, nil +} + +// endingWithErrorReader from r until it returns an error. If the +// final error from r is io.EOF and e is non-nil, e is used instead. +type endingWithErrorReader struct { + r io.Reader + e error +} + +func (er endingWithErrorReader) Read(p []byte) (n int, err error) { + n, err = er.r.Read(p) + if err == io.EOF && er.e != nil { + err = er.e + } + return +} + +// countingWriter counts the number of bytes it receives to write, but +// discards them. +type countingWriter struct { + n *int64 +} + +func (w countingWriter) Write(p []byte) (int, error) { + *w.n += int64(len(p)) + return len(p), nil +} + +// ProgressUpdater is a function that is called upon every progress update of a resumable upload. +// This is the only part of a resumable upload (from googleapi) that is usable by the developer. +// The remaining usable pieces of resumable uploads is exposed in each auto-generated API. +type ProgressUpdater func(current, total int64) + +type MediaOption interface { + setOptions(o *MediaOptions) +} + +type contentTypeOption string + +func (ct contentTypeOption) setOptions(o *MediaOptions) { + o.ContentType = string(ct) + if o.ContentType == "" { + o.ForceEmptyContentType = true + } +} + +// ContentType returns a MediaOption which sets the Content-Type header for media uploads. +// If ctype is empty, the Content-Type header will be omitted. +func ContentType(ctype string) MediaOption { + return contentTypeOption(ctype) +} + +type chunkSizeOption int + +func (cs chunkSizeOption) setOptions(o *MediaOptions) { + size := int(cs) + if size%MinUploadChunkSize != 0 { + size += MinUploadChunkSize - (size % MinUploadChunkSize) + } + o.ChunkSize = size +} + +// ChunkSize returns a MediaOption which sets the chunk size for media uploads. +// size will be rounded up to the nearest multiple of 256K. +// Media which contains fewer than size bytes will be uploaded in a single request. +// Media which contains size bytes or more will be uploaded in separate chunks. +// If size is zero, media will be uploaded in a single request. +func ChunkSize(size int) MediaOption { + return chunkSizeOption(size) +} + +// MediaOptions stores options for customizing media upload. It is not used by developers directly. +type MediaOptions struct { + ContentType string + ForceEmptyContentType bool + + ChunkSize int +} + +// ProcessMediaOptions stores options from opts in a MediaOptions. +// It is not used by developers directly. +func ProcessMediaOptions(opts []MediaOption) *MediaOptions { + mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize} + for _, o := range opts { + o.setOptions(mo) + } + return mo +} + +func ResolveRelative(basestr, relstr string) string { + u, _ := url.Parse(basestr) + rel, _ := url.Parse(relstr) + u = u.ResolveReference(rel) + us := u.String() + us = strings.Replace(us, "%7B", "{", -1) + us = strings.Replace(us, "%7D", "}", -1) + return us +} + +// has4860Fix is whether this Go environment contains the fix for +// http://golang.org/issue/4860 +var has4860Fix bool + +// init initializes has4860Fix by checking the behavior of the net/http package. +func init() { + r := http.Request{ + URL: &url.URL{ + Scheme: "http", + Opaque: "//opaque", + }, + } + b := &bytes.Buffer{} + r.Write(b) + has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http")) +} + +// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it +// don't alter any hex-escaped characters in u.Path. +func SetOpaque(u *url.URL) { + u.Opaque = "//" + u.Host + u.Path + if !has4860Fix { + u.Opaque = u.Scheme + ":" + u.Opaque + } +} + +// Expand subsitutes any {encoded} strings in the URL passed in using +// the map supplied. +// +// This calls SetOpaque to avoid encoding of the parameters in the URL path. +func Expand(u *url.URL, expansions map[string]string) { + expanded, err := uritemplates.Expand(u.Path, expansions) + if err == nil { + u.Path = expanded + SetOpaque(u) + } +} + +// CloseBody is used to close res.Body. +// Prior to calling Close, it also tries to Read a small amount to see an EOF. +// Not seeing an EOF can prevent HTTP Transports from reusing connections. +func CloseBody(res *http.Response) { + if res == nil || res.Body == nil { + return + } + // Justification for 3 byte reads: two for up to "\r\n" after + // a JSON/XML document, and then 1 to see EOF if we haven't yet. + // TODO(bradfitz): detect Go 1.3+ and skip these reads. + // See https://codereview.appspot.com/58240043 + // and https://codereview.appspot.com/49570044 + buf := make([]byte, 1) + for i := 0; i < 3; i++ { + _, err := res.Body.Read(buf) + if err != nil { + break + } + } + res.Body.Close() + +} + +// VariantType returns the type name of the given variant. +// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned. +// This is used to support "variant" APIs that can return one of a number of different types. +func VariantType(t map[string]interface{}) string { + s, _ := t["type"].(string) + return s +} + +// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'. +// This is used to support "variant" APIs that can return one of a number of different types. +// It reports whether the conversion was successful. +func ConvertVariant(v map[string]interface{}, dst interface{}) bool { + var buf bytes.Buffer + err := json.NewEncoder(&buf).Encode(v) + if err != nil { + return false + } + return json.Unmarshal(buf.Bytes(), dst) == nil +} + +// A Field names a field to be retrieved with a partial response. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// +// Partial responses can dramatically reduce the amount of data that must be sent to your application. +// In order to request partial responses, you can specify the full list of fields +// that your application needs by adding the Fields option to your request. +// +// Field strings use camelCase with leading lower-case characters to identify fields within the response. +// +// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, +// you could request just those fields like this: +// +// svc.Events.List().Fields("nextPageToken", "items/id").Do() +// +// or if you were also interested in each Item's "Updated" field, you can combine them like this: +// +// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() +// +// More information about field formatting can be found here: +// https://developers.google.com/+/api/#fields-syntax +// +// Another way to find field names is through the Google API explorer: +// https://developers.google.com/apis-explorer/#p/ +type Field string + +// CombineFields combines fields into a single string. +func CombineFields(s []Field) string { + r := make([]string, len(s)) + for i, v := range s { + r[i] = string(v) + } + return strings.Join(r, ",") +} + +// A CallOption is an optional argument to an API call. +// It should be treated as an opaque value by users of Google APIs. +// +// A CallOption is something that configures an API call in a way that is +// not specific to that API; for instance, controlling the quota user for +// an API call is common across many APIs, and is thus a CallOption. +type CallOption interface { + Get() (key, value string) +} + +// QuotaUser returns a CallOption that will set the quota user for a call. +// The quota user can be used by server-side applications to control accounting. +// It can be an arbitrary string up to 40 characters, and will override UserIP +// if both are provided. +func QuotaUser(u string) CallOption { return quotaUser(u) } + +type quotaUser string + +func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) } + +// UserIP returns a CallOption that will set the "userIp" parameter of a call. +// This should be the IP address of the originating request. +func UserIP(ip string) CallOption { return userIP(ip) } + +type userIP string + +func (i userIP) Get() (string, string) { return "userIp", string(i) } + +// Trace returns a CallOption that enables diagnostic tracing for a call. +// traceToken is an ID supplied by Google support. +func Trace(traceToken string) CallOption { return traceTok(traceToken) } + +type traceTok string + +func (t traceTok) Get() (string, string) { return "trace", "token:" + string(t) } + +// TODO: Fields too diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE new file mode 100644 index 00000000..de9c88cb --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go new file mode 100644 index 00000000..7c103ba1 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go @@ -0,0 +1,220 @@ +// Copyright 2013 Joshua Tacoma. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uritemplates is a level 3 implementation of RFC 6570 (URI +// Template, http://tools.ietf.org/html/rfc6570). +// uritemplates does not support composite values (in Go: slices or maps) +// and so does not qualify as a level 4 implementation. +package uritemplates + +import ( + "bytes" + "errors" + "regexp" + "strconv" + "strings" +) + +var ( + unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") + reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") + validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") + hex = []byte("0123456789ABCDEF") +) + +func pctEncode(src []byte) []byte { + dst := make([]byte, len(src)*3) + for i, b := range src { + buf := dst[i*3 : i*3+3] + buf[0] = 0x25 + buf[1] = hex[b/16] + buf[2] = hex[b%16] + } + return dst +} + +func escape(s string, allowReserved bool) string { + if allowReserved { + return string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } + return string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) +} + +// A uriTemplate is a parsed representation of a URI template. +type uriTemplate struct { + raw string + parts []templatePart +} + +// parse parses a URI template string into a uriTemplate object. +func parse(rawTemplate string) (*uriTemplate, error) { + split := strings.Split(rawTemplate, "{") + parts := make([]templatePart, len(split)*2-1) + for i, s := range split { + if i == 0 { + if strings.Contains(s, "}") { + return nil, errors.New("unexpected }") + } + parts[i].raw = s + continue + } + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + return nil, errors.New("malformed template") + } + expression := subsplit[0] + var err error + parts[i*2-1], err = parseExpression(expression) + if err != nil { + return nil, err + } + parts[i*2].raw = subsplit[1] + } + return &uriTemplate{ + raw: rawTemplate, + parts: parts, + }, nil +} + +type templatePart struct { + raw string + terms []templateTerm + first string + sep string + named bool + ifemp string + allowReserved bool +} + +type templateTerm struct { + name string + explode bool + truncate int +} + +func parseExpression(expression string) (result templatePart, err error) { + switch expression[0] { + case '+': + result.sep = "," + result.allowReserved = true + expression = expression[1:] + case '.': + result.first = "." + result.sep = "." + expression = expression[1:] + case '/': + result.first = "/" + result.sep = "/" + expression = expression[1:] + case ';': + result.first = ";" + result.sep = ";" + result.named = true + expression = expression[1:] + case '?': + result.first = "?" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '&': + result.first = "&" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '#': + result.first = "#" + result.sep = "," + result.allowReserved = true + expression = expression[1:] + default: + result.sep = "," + } + rawterms := strings.Split(expression, ",") + result.terms = make([]templateTerm, len(rawterms)) + for i, raw := range rawterms { + result.terms[i], err = parseTerm(raw) + if err != nil { + break + } + } + return result, err +} + +func parseTerm(term string) (result templateTerm, err error) { + // TODO(djd): Remove "*" suffix parsing once we check that no APIs have + // mistakenly used that attribute. + if strings.HasSuffix(term, "*") { + result.explode = true + term = term[:len(term)-1] + } + split := strings.Split(term, ":") + if len(split) == 1 { + result.name = term + } else if len(split) == 2 { + result.name = split[0] + var parsed int64 + parsed, err = strconv.ParseInt(split[1], 10, 0) + result.truncate = int(parsed) + } else { + err = errors.New("multiple colons in same term") + } + if !validname.MatchString(result.name) { + err = errors.New("not a valid name: " + result.name) + } + if result.explode && result.truncate > 0 { + err = errors.New("both explode and prefix modifers on same term") + } + return result, err +} + +// Expand expands a URI template with a set of values to produce a string. +func (t *uriTemplate) Expand(values map[string]string) string { + var buf bytes.Buffer + for _, p := range t.parts { + p.expand(&buf, values) + } + return buf.String() +} + +func (tp *templatePart) expand(buf *bytes.Buffer, values map[string]string) { + if len(tp.raw) > 0 { + buf.WriteString(tp.raw) + return + } + var first = true + for _, term := range tp.terms { + value, exists := values[term.name] + if !exists { + continue + } + if first { + buf.WriteString(tp.first) + first = false + } else { + buf.WriteString(tp.sep) + } + tp.expandString(buf, term, value) + } +} + +func (tp *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { + if tp.named { + buf.WriteString(name) + if empty { + buf.WriteString(tp.ifemp) + } else { + buf.WriteString("=") + } + } +} + +func (tp *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + tp.expandName(buf, t.name, len(s) == 0) + buf.WriteString(escape(s, tp.allowReserved)) +} diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go new file mode 100644 index 00000000..eff260a6 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uritemplates + +func Expand(path string, values map[string]string) (string, error) { + template, err := parse(path) + if err != nil { + return "", err + } + return template.Expand(values), nil +} diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go new file mode 100644 index 00000000..a02b4b07 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/types.go @@ -0,0 +1,182 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package googleapi + +import ( + "encoding/json" + "strconv" +) + +// Int64s is a slice of int64s that marshal as quoted strings in JSON. +type Int64s []int64 + +func (q *Int64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, int64(v)) + } + return nil +} + +// Int32s is a slice of int32s that marshal as quoted strings in JSON. +type Int32s []int32 + +func (q *Int32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, int32(v)) + } + return nil +} + +// Uint64s is a slice of uint64s that marshal as quoted strings in JSON. +type Uint64s []uint64 + +func (q *Uint64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, uint64(v)) + } + return nil +} + +// Uint32s is a slice of uint32s that marshal as quoted strings in JSON. +type Uint32s []uint32 + +func (q *Uint32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, uint32(v)) + } + return nil +} + +// Float64s is a slice of float64s that marshal as quoted strings in JSON. +type Float64s []float64 + +func (q *Float64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseFloat(s, 64) + if err != nil { + return err + } + *q = append(*q, float64(v)) + } + return nil +} + +func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) { + dst := make([]byte, 0, 2+n*10) // somewhat arbitrary + dst = append(dst, '[') + for i := 0; i < n; i++ { + if i > 0 { + dst = append(dst, ',') + } + dst = append(dst, '"') + dst = fn(dst, i) + dst = append(dst, '"') + } + dst = append(dst, ']') + return dst, nil +} + +func (s Int64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, s[i], 10) + }) +} + +func (s Int32s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, int64(s[i]), 10) + }) +} + +func (s Uint64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, s[i], 10) + }) +} + +func (s Uint32s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, uint64(s[i]), 10) + }) +} + +func (s Float64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendFloat(dst, s[i], 'g', -1, 64) + }) +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { return &v }