diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json deleted file mode 100644 index 2566322147ba..000000000000 --- a/Godeps/Godeps.json +++ /dev/null @@ -1,451 +0,0 @@ -{ - "ImportPath": "k8s.io/test-infra", - "GoVersion": "go1.9", - "GodepVersion": "v79", - "Packages": [ - "./prow/...", - "./gcsweb/...", - "./kubetest/...", - "./mungegithub/...", - "./testgrid/...", - "./velodrome/..." - ], - "Deps": [ - { - "ImportPath": "bitbucket.org/ww/goautoneg", - "Comment": "null-5", - "Rev": "'75cd24fc2f2c2a2088577d12123ddee5f54e0675'" - }, - { - "ImportPath": "cloud.google.com/go/compute/metadata", - "Comment": "v0.1.0-125-g64568e4", - "Rev": "64568e4898e813ad748cf9523d56a65209c7dc18" - }, - { - "ImportPath": "cloud.google.com/go/internal", - "Comment": "v0.1.0-125-g64568e4", - "Rev": "64568e4898e813ad748cf9523d56a65209c7dc18" - }, - { - "ImportPath": "cloud.google.com/go/pubsub", - "Comment": "v0.1.0-125-g64568e4", - "Rev": "64568e4898e813ad748cf9523d56a65209c7dc18" - }, - { - "ImportPath": "github.com/NYTimes/gziphandler", - "Rev": "63027b26b87e2ae2ce3810393d4b81021cfd3a35" - }, - { - "ImportPath": "github.com/sirupsen/logrus", - "Comment": "v0.10.0-28-ga283a10", - "Rev": "a283a10442df8dc09befd873fab202bf8a253d6a" - }, - { - "ImportPath": "github.com/arbovm/levenshtein", - "Rev": "48b4e1c0c4d0b8b1864f1bd2cd31bb20147e4636" - }, - { - "ImportPath": "github.com/beorn7/perks/quantile", - "Rev": "3ac7bf7a47d159a033b107610db8a1b6575507a4" - }, - { - "ImportPath": "github.com/bwmarrin/snowflake", - "Rev": "02cc386c183a1bf89dcac8c8008bb465f69d3340" - }, - { - "ImportPath": "github.com/ghodss/yaml", - "Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee" - }, - { - "ImportPath": "github.com/golang/lint", - "Rev": "c5fb716d6688a859aae56d26d3e6070808df29f7" - }, - { - "ImportPath": "github.com/go-sql-driver/mysql", - "Comment": "v1.2-194-g7ebe0a5", - "Rev": "7ebe0a500653eeb1859664bed5e48dec1e164e73" - }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, - { - "ImportPath": "github.com/golang/mock/gomock", - "Rev": "bd3c8e81be01eef76d4b503f5e687d2d1354d2d9" - }, - { - "ImportPath": "github.com/golang/protobuf/proto", - "Rev": "8ee79997227bf9b34611aee7946ae64735e6fd93" - }, - { - "ImportPath": "github.com/google/btree", - "Rev": "cc6329d4279e3f025a53a83c397d2339b5705c45" - }, - { - "ImportPath": "github.com/google/go-github/github", - "Rev": "7a51fb928f52a196d5f31daefb8a489453ef54ff" - }, - { - "ImportPath": "github.com/google/go-querystring/query", - "Rev": "547ef5ac979778feb2f760cdb5f4eae1a2207b86" - }, - { - "ImportPath": "github.com/google/gofuzz", - "Rev": "bbcb9da2d746f8bdbd6a936686a0a6067ada0ec5" - }, - { - "ImportPath": "github.com/gregjones/httpcache", - "Rev": "16db777d8ebea60a3e746a7e0703eaaa53f85f7f" - }, - { - "ImportPath": "github.com/gregjones/httpcache/diskcache", - "Rev": "16db777d8ebea60a3e746a7e0703eaaa53f85f7f" - }, - { - "ImportPath": "github.com/inconshreveable/mousetrap", - "Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" - }, - { - "ImportPath": "github.com/influxdata/influxdb/client/v2", - "Comment": "v1.1.0-118-g049f9b42", - "Rev": "049f9b42e9a5ca4c0bcb4439a9f33fd83b485077" - }, - { - "ImportPath": "github.com/influxdata/influxdb/models", - "Comment": "v1.1.0-118-g049f9b42", - "Rev": "049f9b42e9a5ca4c0bcb4439a9f33fd83b485077" - }, - { - "ImportPath": "github.com/influxdata/influxdb/pkg/escape", - "Comment": "v1.1.0-118-g049f9b42", - "Rev": "049f9b42e9a5ca4c0bcb4439a9f33fd83b485077" - }, - { - "ImportPath": "github.com/jinzhu/gorm", - "Comment": "v1.0-132-g572d0a0", - "Rev": "572d0a0ab1eb75410a2729e96239152d1da7a91f" - }, - { - "ImportPath": "github.com/jinzhu/gorm/dialects/mysql", - "Comment": "v1.0-132-g572d0a0", - "Rev": "572d0a0ab1eb75410a2729e96239152d1da7a91f" - }, - { - "ImportPath": "github.com/jinzhu/gorm/dialects/sqlite", - "Comment": "v1.0-132-g572d0a0", - "Rev": "572d0a0ab1eb75410a2729e96239152d1da7a91f" - }, - { - "ImportPath": "github.com/jinzhu/inflection", - "Rev": "3272df6c21d04180007eb3349844c89a3856bc25" - }, - { - "ImportPath": "github.com/mattn/go-sqlite3", - "Comment": "v1.1.0-69-g38ee283", - "Rev": "38ee283dabf11c9cbdb968eebd79b1fa7acbabe6" - }, - { - "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil", - "Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a" - }, - { - "ImportPath": "github.com/peterbourgon/diskv", - "Comment": "v2.0.0-2-g5dfcb07", - "Rev": "5dfcb07a075adbaaa4094cddfd160b1e1c77a043" - }, - { - "ImportPath": "github.com/prometheus/client_golang/prometheus", - "Comment": "v0.8.0", - "Rev": "c5b7fccd204277076155f10851dad72b76a49317" - }, - { - "ImportPath": "github.com/prometheus/client_golang/prometheus/promhttp", - "Comment": "v0.8.0", - "Rev": "c5b7fccd204277076155f10851dad72b76a49317" - }, - { - "ImportPath": "github.com/prometheus/client_golang/prometheus/push", - "Comment": "v0.8.0", - "Rev": "c5b7fccd204277076155f10851dad72b76a49317" - }, - { - "ImportPath": "github.com/prometheus/client_model/go", - "Comment": "model-0.0.2-12-gfa8ad6f", - "Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6" - }, - { - "ImportPath": "github.com/prometheus/common/expfmt", - "Rev": "ffe929a3f4c4faeaa10f2b9535c2b1be3ad15650" - }, - { - "ImportPath": "github.com/prometheus/common/model", - "Rev": "ffe929a3f4c4faeaa10f2b9535c2b1be3ad15650" - }, - { - "ImportPath": "github.com/prometheus/procfs", - "Rev": "454a56f35412459b5e684fd5ec0f9211b94f002a" - }, - { - "ImportPath": "github.com/satori/go.uuid", - "Comment": "v1.1.0-1-g0aa62d5", - "Rev": "0aa62d5ddceb50dbcb909d790b5345affd3669b6" - }, - { - "ImportPath": "github.com/shurcooL/githubql", - "Rev": "71882d9f5f43fe50be204fe37db8994bf70072bb" - }, - { - "ImportPath": "github.com/shurcooL/go/ctxhttp", - "Rev": "5924e6132c2f184e262d4bc779e5745fa297d093" - }, - { - "ImportPath": "github.com/shurcooL/graphql", - "Rev": "68e5b1b3ab56633e42939506bc61f0b9fb2b7a79" - }, - { - "ImportPath": "github.com/shurcooL/graphql/ident", - "Rev": "68e5b1b3ab56633e42939506bc61f0b9fb2b7a79" - }, - { - "ImportPath": "github.com/shurcooL/graphql/internal/jsonutil", - "Rev": "68e5b1b3ab56633e42939506bc61f0b9fb2b7a79" - }, - { - "ImportPath": "github.com/spf13/cobra", - "Rev": "4c05eb1145f16d0e6bb4a3e1b6d769f4713cb41f" - }, - { - "ImportPath": "github.com/spf13/pflag", - "Comment": "v1.0.0-3-g7aff26d", - "Rev": "7aff26db30c1be810f9de5038ec5ef96ac41fd7c" - }, - { - "ImportPath": "golang.org/x/crypto/curve25519", - "Rev": "81e90905daefcd6fd217b62423c0908922eadb30" - }, - { - "ImportPath": "golang.org/x/crypto/ed25519", - "Rev": "81e90905daefcd6fd217b62423c0908922eadb30" - }, - { - "ImportPath": "golang.org/x/crypto/ed25519/internal/edwards25519", - "Rev": "81e90905daefcd6fd217b62423c0908922eadb30" - }, - { - "ImportPath": "golang.org/x/crypto/ssh", - "Rev": "81e90905daefcd6fd217b62423c0908922eadb30" - }, - { - "ImportPath": "golang.org/x/net/context", - "Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" - }, - { - "ImportPath": "golang.org/x/net/context/ctxhttp", - "Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" - }, - { - "ImportPath": "golang.org/x/net/http2", - "Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" - }, - { - "ImportPath": "golang.org/x/net/http2/hpack", - "Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" - }, - { - "ImportPath": "golang.org/x/net/idna", - "Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" - }, - { - "ImportPath": "golang.org/x/net/internal/timeseries", - "Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" - }, - { - "ImportPath": "golang.org/x/net/lex/httplex", - "Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" - }, - { - "ImportPath": "golang.org/x/net/trace", - "Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d" - }, - { - "ImportPath": "golang.org/x/oauth2", - "Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01" - }, - { - "ImportPath": "golang.org/x/oauth2/google", - "Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01" - }, - { - "ImportPath": "golang.org/x/oauth2/internal", - "Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01" - }, - { - "ImportPath": "golang.org/x/oauth2/jws", - "Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01" - }, - { - "ImportPath": "golang.org/x/oauth2/jwt", - "Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01" - }, - { - "ImportPath": "golang.org/x/sys/unix", - "Rev": "a646d33e2ee3172a661fc09bca23bb4889a41bc8" - }, - { - "ImportPath": "golang.org/x/tools/go/gcexportdata", - "Rev": "1f0f7f68c993a0ff4e0fe1793c08cdd14cc243cb" - }, - { - "ImportPath": "golang.org/x/tools/go/gcimporter15", - "Rev": "1f0f7f68c993a0ff4e0fe1793c08cdd14cc243cb" - }, - { - "ImportPath": "google.golang.org/api/gensupport", - "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" - }, - { - "ImportPath": "google.golang.org/api/googleapi", - "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" - }, - { - "ImportPath": "google.golang.org/api/googleapi/internal/uritemplates", - "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" - }, - { - "ImportPath": "google.golang.org/api/internal", - "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" - }, - { - "ImportPath": "google.golang.org/api/iterator", - "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" - }, - { - "ImportPath": "google.golang.org/api/option", - "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" - }, - { - "ImportPath": "google.golang.org/api/pubsub/v1", - "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" - }, - { - "ImportPath": "google.golang.org/api/transport", - "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" - }, - { - "ImportPath": "google.golang.org/appengine", - "Rev": "78199dcb0669fc381c22e919e1e97eba879e8f60" - }, - { - "ImportPath": "google.golang.org/appengine/internal", - "Rev": "78199dcb0669fc381c22e919e1e97eba879e8f60" - }, - { - "ImportPath": "google.golang.org/appengine/internal/app_identity", - "Rev": "78199dcb0669fc381c22e919e1e97eba879e8f60" - }, - { - "ImportPath": "google.golang.org/appengine/internal/base", - "Rev": "78199dcb0669fc381c22e919e1e97eba879e8f60" - }, - { - "ImportPath": "google.golang.org/appengine/internal/datastore", - "Rev": "78199dcb0669fc381c22e919e1e97eba879e8f60" - }, - { - "ImportPath": "google.golang.org/appengine/internal/log", - "Rev": "78199dcb0669fc381c22e919e1e97eba879e8f60" - }, - { - "ImportPath": "google.golang.org/appengine/internal/modules", - "Rev": "78199dcb0669fc381c22e919e1e97eba879e8f60" - }, - { - "ImportPath": "google.golang.org/appengine/internal/remote_api", - "Rev": "78199dcb0669fc381c22e919e1e97eba879e8f60" - }, - { - "ImportPath": "google.golang.org/grpc", - "Comment": "v1.0.1-GA-55-g71d2ea4", - "Rev": "71d2ea4f75286a63b606aca2422cd17ff37fd5b8" - }, - { - "ImportPath": "google.golang.org/grpc/codes", - "Comment": "v1.0.1-GA-55-g71d2ea4", - "Rev": "71d2ea4f75286a63b606aca2422cd17ff37fd5b8" - }, - { - "ImportPath": "google.golang.org/grpc/credentials", - "Comment": "v1.0.1-GA-55-g71d2ea4", - "Rev": "71d2ea4f75286a63b606aca2422cd17ff37fd5b8" - }, - { - "ImportPath": "google.golang.org/grpc/credentials/oauth", - "Comment": "v1.0.1-GA-55-g71d2ea4", - "Rev": "71d2ea4f75286a63b606aca2422cd17ff37fd5b8" - }, - { - "ImportPath": "google.golang.org/grpc/grpclog", - "Comment": "v1.0.1-GA-55-g71d2ea4", - "Rev": "71d2ea4f75286a63b606aca2422cd17ff37fd5b8" - }, - { - "ImportPath": "google.golang.org/grpc/internal", - "Comment": "v1.0.1-GA-55-g71d2ea4", - "Rev": "71d2ea4f75286a63b606aca2422cd17ff37fd5b8" - }, - { - "ImportPath": "google.golang.org/grpc/metadata", - "Comment": "v1.0.1-GA-55-g71d2ea4", - "Rev": "71d2ea4f75286a63b606aca2422cd17ff37fd5b8" - }, - { - "ImportPath": "google.golang.org/grpc/naming", - "Comment": "v1.0.1-GA-55-g71d2ea4", - "Rev": "71d2ea4f75286a63b606aca2422cd17ff37fd5b8" - }, - { - "ImportPath": "google.golang.org/grpc/peer", - "Comment": "v1.0.1-GA-55-g71d2ea4", - "Rev": "71d2ea4f75286a63b606aca2422cd17ff37fd5b8" - }, - { - "ImportPath": "google.golang.org/grpc/transport", - "Comment": "v1.0.1-GA-55-g71d2ea4", - "Rev": "71d2ea4f75286a63b606aca2422cd17ff37fd5b8" - }, - { - "ImportPath": "gopkg.in/yaml.v2", - "Rev": "a83829b6f1293c91addabc89d0571c246397bbf4" - }, - { - "ImportPath": "k8s.io/contrib/test-utils/utils", - "Comment": "cluster-autoscaler-0.3.0-138-g8f0f928c", - "Rev": "8f0f928c68f2c76ade9c1ee0249ae5cea5673cac" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/clock", - "Comment": "v1.5.0-alpha.0-1022-g87b40782c9", - "Rev": "87b40782c9c1cc82ba42f75de4cf23126297e0a3" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/errors", - "Comment": "v1.5.0-alpha.0-1022-g87b40782c9", - "Rev": "87b40782c9c1cc82ba42f75de4cf23126297e0a3" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/flag", - "Comment": "v1.5.0-alpha.0-1022-g87b40782c9", - "Rev": "87b40782c9c1cc82ba42f75de4cf23126297e0a3" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/sets", - "Comment": "v1.5.0-alpha.0-1022-g87b40782c9", - "Rev": "87b40782c9c1cc82ba42f75de4cf23126297e0a3" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/yaml", - "Comment": "v1.5.0-alpha.0-1022-g87b40782c9", - "Rev": "87b40782c9c1cc82ba42f75de4cf23126297e0a3" - } - ] -} diff --git a/Godeps/Readme b/Godeps/Readme deleted file mode 100644 index 4cdaa53d56d7..000000000000 --- a/Godeps/Readme +++ /dev/null @@ -1,5 +0,0 @@ -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 000000000000..87c83f21087a --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,227 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "bitbucket.org/ww/goautoneg" + packages = ["."] + revision = "'75cd24fc2f2c2a2088577d12123ddee5f54e0675'" + +[[projects]] + name = "github.com/NYTimes/gziphandler" + packages = ["."] + revision = "63027b26b87e2ae2ce3810393d4b81021cfd3a35" + +[[projects]] + name = "github.com/beorn7/perks" + packages = ["quantile"] + revision = "3ac7bf7a47d159a033b107610db8a1b6575507a4" + +[[projects]] + name = "github.com/bwmarrin/snowflake" + packages = ["."] + revision = "02cc386c183a1bf89dcac8c8008bb465f69d3340" + +[[projects]] + name = "github.com/ghodss/yaml" + packages = ["."] + revision = "73d445a93680fa1a78ae23a5839bad48f32ba1ee" + +[[projects]] + name = "github.com/go-sql-driver/mysql" + packages = ["."] + revision = "7ebe0a500653eeb1859664bed5e48dec1e164e73" + +[[projects]] + name = "github.com/golang/glog" + packages = ["."] + revision = "44145f04b68cf362d9c4df2182967c2275eaefed" + +[[projects]] + name = "github.com/golang/lint" + packages = ["."] + revision = "c5fb716d6688a859aae56d26d3e6070808df29f7" + +[[projects]] + name = "github.com/golang/mock" + packages = ["gomock"] + revision = "bd3c8e81be01eef76d4b503f5e687d2d1354d2d9" + +[[projects]] + name = "github.com/golang/protobuf" + packages = ["proto"] + revision = "8ee79997227bf9b34611aee7946ae64735e6fd93" + +[[projects]] + name = "github.com/google/btree" + packages = ["."] + revision = "cc6329d4279e3f025a53a83c397d2339b5705c45" + +[[projects]] + name = "github.com/google/go-github" + packages = ["github"] + revision = "7a51fb928f52a196d5f31daefb8a489453ef54ff" + +[[projects]] + name = "github.com/google/go-querystring" + packages = ["query"] + revision = "547ef5ac979778feb2f760cdb5f4eae1a2207b86" + +[[projects]] + name = "github.com/google/gofuzz" + packages = ["."] + revision = "bbcb9da2d746f8bdbd6a936686a0a6067ada0ec5" + +[[projects]] + name = "github.com/gregjones/httpcache" + packages = [".","diskcache"] + revision = "16db777d8ebea60a3e746a7e0703eaaa53f85f7f" + +[[projects]] + name = "github.com/inconshreveable/mousetrap" + packages = ["."] + revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + version = "v1.0" + +[[projects]] + name = "github.com/influxdata/influxdb" + packages = ["client/v2","models","pkg/escape"] + revision = "049f9b42e9a5ca4c0bcb4439a9f33fd83b485077" + +[[projects]] + name = "github.com/jinzhu/gorm" + packages = [".","dialects/mysql","dialects/sqlite"] + revision = "572d0a0ab1eb75410a2729e96239152d1da7a91f" + +[[projects]] + name = "github.com/jinzhu/inflection" + packages = ["."] + revision = "3272df6c21d04180007eb3349844c89a3856bc25" + +[[projects]] + name = "github.com/mattn/go-sqlite3" + packages = ["."] + revision = "38ee283dabf11c9cbdb968eebd79b1fa7acbabe6" + +[[projects]] + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + revision = "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a" + +[[projects]] + branch = "master" + name = "github.com/petar/GoLLRB" + packages = ["llrb"] + revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" + +[[projects]] + name = "github.com/peterbourgon/diskv" + packages = ["."] + revision = "5dfcb07a075adbaaa4094cddfd160b1e1c77a043" + +[[projects]] + name = "github.com/prometheus/client_golang" + packages = ["prometheus","prometheus/promhttp","prometheus/push"] + revision = "c5b7fccd204277076155f10851dad72b76a49317" + version = "v0.8.0" + +[[projects]] + name = "github.com/prometheus/client_model" + packages = ["go"] + revision = "fa8ad6fec33561be4280a8f0514318c79d7f6cb6" + +[[projects]] + name = "github.com/prometheus/common" + packages = ["expfmt","model"] + revision = "ffe929a3f4c4faeaa10f2b9535c2b1be3ad15650" + +[[projects]] + name = "github.com/prometheus/procfs" + packages = ["."] + revision = "454a56f35412459b5e684fd5ec0f9211b94f002a" + +[[projects]] + name = "github.com/satori/go.uuid" + packages = ["."] + revision = "0aa62d5ddceb50dbcb909d790b5345affd3669b6" + +[[projects]] + name = "github.com/shurcooL/githubql" + packages = ["."] + revision = "71882d9f5f43fe50be204fe37db8994bf70072bb" + +[[projects]] + name = "github.com/shurcooL/go" + packages = ["ctxhttp"] + revision = "5924e6132c2f184e262d4bc779e5745fa297d093" + +[[projects]] + name = "github.com/shurcooL/graphql" + packages = [".","ident","internal/jsonutil"] + revision = "68e5b1b3ab56633e42939506bc61f0b9fb2b7a79" + +[[projects]] + name = "github.com/sirupsen/logrus" + packages = ["."] + revision = "a283a10442df8dc09befd873fab202bf8a253d6a" + +[[projects]] + name = "github.com/spf13/cobra" + packages = ["."] + revision = "4c05eb1145f16d0e6bb4a3e1b6d769f4713cb41f" + +[[projects]] + name = "github.com/spf13/pflag" + packages = ["."] + revision = "7aff26db30c1be810f9de5038ec5ef96ac41fd7c" + +[[projects]] + name = "golang.org/x/crypto" + packages = ["curve25519","ed25519","ed25519/internal/edwards25519","ssh"] + revision = "81e90905daefcd6fd217b62423c0908922eadb30" + +[[projects]] + name = "golang.org/x/net" + packages = ["context"] + revision = "f2499483f923065a842d38eb4c7f1927e6fc6e6d" + +[[projects]] + name = "golang.org/x/oauth2" + packages = [".","internal"] + revision = "3c3a985cb79f52a3190fbc056984415ca6763d01" + +[[projects]] + name = "golang.org/x/sys" + packages = ["unix"] + revision = "a646d33e2ee3172a661fc09bca23bb4889a41bc8" + +[[projects]] + name = "golang.org/x/tools" + packages = ["go/gcexportdata","go/gcimporter15"] + revision = "1f0f7f68c993a0ff4e0fe1793c08cdd14cc243cb" + +[[projects]] + name = "google.golang.org/appengine" + packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"] + revision = "78199dcb0669fc381c22e919e1e97eba879e8f60" + +[[projects]] + name = "gopkg.in/yaml.v2" + packages = ["."] + revision = "a83829b6f1293c91addabc89d0571c246397bbf4" + +[[projects]] + name = "k8s.io/contrib" + packages = ["test-utils/utils"] + revision = "8f0f928c68f2c76ade9c1ee0249ae5cea5673cac" + +[[projects]] + name = "k8s.io/kubernetes" + packages = ["pkg/util/clock","pkg/util/flag","pkg/util/sets","pkg/util/yaml"] + revision = "87b40782c9c1cc82ba42f75de4cf23126297e0a3" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "7bad0bf5607b1377510ca52c90243799c63f4cc44260b92383695e0def26991e" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 000000000000..e3e8a40bb59f --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,77 @@ +# Gopkg.toml for k8s.io/test-infra + +# Ignore AWS SDK used by jenkins/aws-janitor +ignored = ["github.com/aws/aws-sdk-go*"] + +[[constraint]] + name = "github.com/NYTimes/gziphandler" + +[[constraint]] + name = "github.com/bwmarrin/snowflake" + +[[constraint]] + name = "github.com/ghodss/yaml" + +[[constraint]] + name = "github.com/golang/glog" + +[[constraint]] + name = "github.com/golang/lint" + +[[constraint]] + name = "github.com/golang/mock" + +[[constraint]] + name = "github.com/golang/protobuf" + +[[constraint]] + name = "github.com/google/go-github" + +[[constraint]] + name = "github.com/google/gofuzz" + +[[constraint]] + name = "github.com/gregjones/httpcache" + +[[constraint]] + name = "github.com/influxdata/influxdb" + +[[constraint]] + name = "github.com/jinzhu/gorm" + +[[constraint]] + name = "github.com/peterbourgon/diskv" + +[[constraint]] + name = "github.com/prometheus/client_golang" + version = "0.8.0" + +[[constraint]] + name = "github.com/satori/go.uuid" + +[[constraint]] + name = "github.com/shurcooL/githubql" + +[[constraint]] + name = "github.com/sirupsen/logrus" + +[[constraint]] + name = "github.com/spf13/cobra" + +[[constraint]] + name = "github.com/spf13/pflag" + +[[constraint]] + name = "golang.org/x/crypto" + +[[constraint]] + name = "golang.org/x/oauth2" + +[[constraint]] + name = "gopkg.in/yaml.v2" + +[[constraint]] + name = "k8s.io/contrib" + +[[constraint]] + name = "k8s.io/kubernetes" diff --git a/README.md b/README.md index b5b24b217857..a682a207ccd2 100644 --- a/README.md +++ b/README.md @@ -118,6 +118,10 @@ test jobs for a variety of platforms (e.g., Azure, rktnetes). The test-history scripts gather e2e results from these federated jobs. For information about how to contribute test results, see [Federated Testing](docs/federated_testing.md). +## Other Docs + +* [kubernetes/test-infra dependency management](docs/dep.md) + [`jobs/config.json`]: https://github.com/kubernetes/test-infra/blob/master/jobs/config.json [`prow/config.yaml`]: https://github.com/kubernetes/test-infra/blob/master/prow/config.yaml diff --git a/docs/dep.md b/docs/dep.md new file mode 100644 index 000000000000..70a0f12244e8 --- /dev/null +++ b/docs/dep.md @@ -0,0 +1,26 @@ +# kubernetes/test-infra dependency management + +test-infra uses [`dep`](https://github.com/golang/dep) for Go dependency +management. `dep` is a prototype dependency management tool for Go. It requires +Go 1.8 or newer to compile. + + +## Setup + +You can follow the [setup instructions](https://github.com/golang/dep#setup) to +set up `dep` in your local environment. + + +## Changing dependencies + +You can use the `dep` instructions for [adding](https://github.com/golang/dep#adding-a-dependency), +[updating](https://github.com/golang/dep#updating-dependencies) or +[removing](https://github.com/golang/dep#removing-dependencies) a dependency. + +Once you've updated, make sure to run: +``` +dep prune +verify/update-bazel.sh +``` + +To prune unneeded deps, and then update all the bazel files that `dep` blows away. diff --git a/vendor/BUILD b/vendor/BUILD index 15d40330f6de..8f95158fb805 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -1,5 +1,3 @@ -package(default_visibility = ["//visibility:public"]) - filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -12,11 +10,7 @@ filegroup( srcs = [ ":package-srcs", "//vendor/bitbucket.org/ww/goautoneg:all-srcs", - "//vendor/cloud.google.com/go/compute/metadata:all-srcs", - "//vendor/cloud.google.com/go/internal:all-srcs", - "//vendor/cloud.google.com/go/pubsub:all-srcs", "//vendor/github.com/NYTimes/gziphandler:all-srcs", - "//vendor/github.com/arbovm/levenshtein:all-srcs", "//vendor/github.com/beorn7/perks/quantile:all-srcs", "//vendor/github.com/bwmarrin/snowflake:all-srcs", "//vendor/github.com/ghodss/yaml:all-srcs", @@ -31,13 +25,12 @@ filegroup( "//vendor/github.com/google/gofuzz:all-srcs", "//vendor/github.com/gregjones/httpcache:all-srcs", "//vendor/github.com/inconshreveable/mousetrap:all-srcs", - "//vendor/github.com/influxdata/influxdb/client/v2:all-srcs", - "//vendor/github.com/influxdata/influxdb/models:all-srcs", - "//vendor/github.com/influxdata/influxdb/pkg/escape:all-srcs", + "//vendor/github.com/influxdata/influxdb:all-srcs", "//vendor/github.com/jinzhu/gorm:all-srcs", "//vendor/github.com/jinzhu/inflection:all-srcs", "//vendor/github.com/mattn/go-sqlite3:all-srcs", "//vendor/github.com/matttproud/golang_protobuf_extensions/pbutil:all-srcs", + "//vendor/github.com/petar/GoLLRB/llrb:all-srcs", "//vendor/github.com/peterbourgon/diskv:all-srcs", "//vendor/github.com/prometheus/client_golang/prometheus:all-srcs", "//vendor/github.com/prometheus/client_model/go:all-srcs", @@ -55,31 +48,15 @@ filegroup( "//vendor/golang.org/x/crypto/ed25519:all-srcs", "//vendor/golang.org/x/crypto/ssh:all-srcs", "//vendor/golang.org/x/net/context:all-srcs", - "//vendor/golang.org/x/net/http2:all-srcs", - "//vendor/golang.org/x/net/idna:all-srcs", - "//vendor/golang.org/x/net/internal/timeseries:all-srcs", - "//vendor/golang.org/x/net/lex/httplex:all-srcs", - "//vendor/golang.org/x/net/trace:all-srcs", "//vendor/golang.org/x/oauth2:all-srcs", "//vendor/golang.org/x/sys/unix:all-srcs", "//vendor/golang.org/x/tools/go/gcexportdata:all-srcs", "//vendor/golang.org/x/tools/go/gcimporter15:all-srcs", - "//vendor/google.golang.org/api/gensupport:all-srcs", - "//vendor/google.golang.org/api/googleapi:all-srcs", - "//vendor/google.golang.org/api/internal:all-srcs", - "//vendor/google.golang.org/api/iterator:all-srcs", - "//vendor/google.golang.org/api/option:all-srcs", - "//vendor/google.golang.org/api/pubsub/v1:all-srcs", - "//vendor/google.golang.org/api/transport:all-srcs", "//vendor/google.golang.org/appengine:all-srcs", - "//vendor/google.golang.org/grpc:all-srcs", "//vendor/gopkg.in/yaml.v2:all-srcs", "//vendor/k8s.io/contrib/test-utils/utils:all-srcs", - "//vendor/k8s.io/kubernetes/pkg/util/clock:all-srcs", - "//vendor/k8s.io/kubernetes/pkg/util/errors:all-srcs", - "//vendor/k8s.io/kubernetes/pkg/util/flag:all-srcs", - "//vendor/k8s.io/kubernetes/pkg/util/sets:all-srcs", - "//vendor/k8s.io/kubernetes/pkg/util/yaml:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/util:all-srcs", ], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/bitbucket.org/ww/goautoneg/BUILD b/vendor/bitbucket.org/ww/goautoneg/BUILD index 3564545ba967..8616afc6eee1 100644 --- a/vendor/bitbucket.org/ww/goautoneg/BUILD +++ b/vendor/bitbucket.org/ww/goautoneg/BUILD @@ -1,14 +1,17 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["autoneg.go"], importpath = "bitbucket.org/ww/goautoneg", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["autoneg_test.go"], + importpath = "bitbucket.org/ww/goautoneg", + library = ":go_default_library", ) filegroup( @@ -22,4 +25,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/bitbucket.org/ww/goautoneg/autoneg_test.go b/vendor/bitbucket.org/ww/goautoneg/autoneg_test.go new file mode 100644 index 000000000000..41d328f1d5f9 --- /dev/null +++ b/vendor/bitbucket.org/ww/goautoneg/autoneg_test.go @@ -0,0 +1,33 @@ +package goautoneg + +import ( + "testing" +) + +var chrome = "application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5" + +func TestParseAccept(t *testing.T) { + alternatives := []string{"text/html", "image/png"} + content_type := Negotiate(chrome, alternatives) + if content_type != "image/png" { + t.Errorf("got %s expected image/png", content_type) + } + + alternatives = []string{"text/html", "text/plain", "text/n3"} + content_type = Negotiate(chrome, alternatives) + if content_type != "text/html" { + t.Errorf("got %s expected text/html", content_type) + } + + alternatives = []string{"text/n3", "text/plain"} + content_type = Negotiate(chrome, alternatives) + if content_type != "text/plain" { + t.Errorf("got %s expected text/plain", content_type) + } + + alternatives = []string{"text/n3", "application/rdf+xml"} + content_type = Negotiate(chrome, alternatives) + if content_type != "text/n3" { + t.Errorf("got %s expected text/n3", content_type) + } +} diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE deleted file mode 100644 index a4c5efd822fb..000000000000 --- a/vendor/cloud.google.com/go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2014 Google Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/BUILD b/vendor/cloud.google.com/go/compute/metadata/BUILD deleted file mode 100644 index dcfe259bde2a..000000000000 --- a/vendor/cloud.google.com/go/compute/metadata/BUILD +++ /dev/null @@ -1,30 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["metadata.go"], - importpath = "cloud.google.com/go/compute/metadata", - deps = [ - "//vendor/cloud.google.com/go/internal:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/golang.org/x/net/context/ctxhttp:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go deleted file mode 100644 index 5c6f3bf382e9..000000000000 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ /dev/null @@ -1,438 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metadata provides access to Google Compute Engine (GCE) -// metadata and API service accounts. -// -// This package is a wrapper around the GCE metadata service, -// as documented at https://developers.google.com/compute/docs/metadata. -package metadata - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" - - "cloud.google.com/go/internal" -) - -const ( - // metadataIP is the documented metadata server IP address. - metadataIP = "169.254.169.254" - - // metadataHostEnv is the environment variable specifying the - // GCE metadata hostname. If empty, the default value of - // metadataIP ("169.254.169.254") is used instead. - // This is variable name is not defined by any spec, as far as - // I know; it was made up for the Go package. - metadataHostEnv = "GCE_METADATA_HOST" -) - -type cachedValue struct { - k string - trim bool - mu sync.Mutex - v string -} - -var ( - projID = &cachedValue{k: "project/project-id", trim: true} - projNum = &cachedValue{k: "project/numeric-project-id", trim: true} - instID = &cachedValue{k: "instance/id", trim: true} -) - -var ( - metaClient = &http.Client{ - Transport: &internal.Transport{ - Base: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - ResponseHeaderTimeout: 2 * time.Second, - }, - }, - } - subscribeClient = &http.Client{ - Transport: &internal.Transport{ - Base: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - }, - }, - } -) - -// NotDefinedError is returned when requested metadata is not defined. -// -// The underlying string is the suffix after "/computeMetadata/v1/". -// -// This error is not returned if the value is defined to be the empty -// string. -type NotDefinedError string - -func (suffix NotDefinedError) Error() string { - return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) -} - -// Get returns a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// -// If the GCE_METADATA_HOST environment variable is not defined, a default of -// 169.254.169.254 will be used instead. -// -// If the requested metadata is not defined, the returned error will -// be of type NotDefinedError. -func Get(suffix string) (string, error) { - val, _, err := getETag(metaClient, suffix) - return val, err -} - -// getETag returns a value from the metadata service as well as the associated -// ETag using the provided client. This func is otherwise equivalent to Get. -func getETag(client *http.Client, suffix string) (value, etag string, err error) { - // Using a fixed IP makes it very difficult to spoof the metadata service in - // a container, which is an important use-case for local testing of cloud - // deployments. To enable spoofing of the metadata service, the environment - // variable GCE_METADATA_HOST is first inspected to decide where metadata - // requests shall go. - host := os.Getenv(metadataHostEnv) - if host == "" { - // Using 169.254.169.254 instead of "metadata" here because Go - // binaries built with the "netgo" tag and without cgo won't - // know the search suffix for "metadata" is - // ".google.internal", and this IP address is documented as - // being stable anyway. - host = metadataIP - } - url := "http://" + host + "/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", url, nil) - req.Header.Set("Metadata-Flavor", "Google") - res, err := client.Do(req) - if err != nil { - return "", "", err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return "", "", NotDefinedError(suffix) - } - if res.StatusCode != 200 { - return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) - } - all, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - return string(all), res.Header.Get("Etag"), nil -} - -func getTrimmed(suffix string) (s string, err error) { - s, err = Get(suffix) - s = strings.TrimSpace(s) - return -} - -func (c *cachedValue) get() (v string, err error) { - defer c.mu.Unlock() - c.mu.Lock() - if c.v != "" { - return c.v, nil - } - if c.trim { - v, err = getTrimmed(c.k) - } else { - v, err = Get(c.k) - } - if err == nil { - c.v = v - } - return -} - -var ( - onGCEOnce sync.Once - onGCE bool -) - -// OnGCE reports whether this process is running on Google Compute Engine. -func OnGCE() bool { - onGCEOnce.Do(initOnGCE) - return onGCE -} - -func initOnGCE() { - onGCE = testOnGCE() -} - -func testOnGCE() bool { - // The user explicitly said they're on GCE, so trust them. - if os.Getenv(metadataHostEnv) != "" { - return true - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - resc := make(chan bool, 2) - - // Try two strategies in parallel. - // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 - go func() { - res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP) - if err != nil { - resc <- false - return - } - defer res.Body.Close() - resc <- res.Header.Get("Metadata-Flavor") == "Google" - }() - - go func() { - addrs, err := net.LookupHost("metadata.google.internal") - if err != nil || len(addrs) == 0 { - resc <- false - return - } - resc <- strsContains(addrs, metadataIP) - }() - - tryHarder := systemInfoSuggestsGCE() - if tryHarder { - res := <-resc - if res { - // The first strategy succeeded, so let's use it. - return true - } - // Wait for either the DNS or metadata server probe to - // contradict the other one and say we are running on - // GCE. Give it a lot of time to do so, since the system - // info already suggests we're running on a GCE BIOS. - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() - select { - case res = <-resc: - return res - case <-timer.C: - // Too slow. Who knows what this system is. - return false - } - } - - // There's no hint from the system info that we're running on - // GCE, so use the first probe's result as truth, whether it's - // true or false. The goal here is to optimize for speed for - // users who are NOT running on GCE. We can't assume that - // either a DNS lookup or an HTTP request to a blackholed IP - // address is fast. Worst case this should return when the - // metaClient's Transport.ResponseHeaderTimeout or - // Transport.Dial.Timeout fires (in two seconds). - return <-resc -} - -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") - name := strings.TrimSpace(string(slurp)) - return name == "Google" || name == "Google Compute Engine" -} - -// Subscribe subscribes to a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// The suffix may contain query parameters. -// -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. -func Subscribe(suffix string, fn func(v string, ok bool) error) error { - const failedSubscribeSleep = time.Second * 5 - - // First check to see if the metadata value exists at all. - val, lastETag, err := getETag(subscribeClient, suffix) - if err != nil { - return err - } - - if err := fn(val, true); err != nil { - return err - } - - ok := true - if strings.ContainsRune(suffix, '?') { - suffix += "&wait_for_change=true&last_etag=" - } else { - suffix += "?wait_for_change=true&last_etag=" - } - for { - val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) - if err != nil { - if _, deleted := err.(NotDefinedError); !deleted { - time.Sleep(failedSubscribeSleep) - continue // Retry on other errors. - } - ok = false - } - lastETag = etag - - if err := fn(val, ok); err != nil || !ok { - return err - } - } -} - -// ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return projID.get() } - -// NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return projNum.get() } - -// InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { - return getTrimmed("instance/network-interfaces/0/ip") -} - -// ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { - return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") -} - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func Hostname() (string, error) { - return getTrimmed("instance/hostname") -} - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { - var s []string - j, err := Get("instance/tags") - if err != nil { - return nil, err - } - if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { - return nil, err - } - return s, nil -} - -// InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { - return instID.get() -} - -// InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { - host, err := Hostname() - if err != nil { - return "", err - } - return strings.Split(host, ".")[0], nil -} - -// Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { - zone, err := getTrimmed("instance/zone") - // zone is of the form "projects//zones/". - if err != nil { - return "", err - } - return zone[strings.LastIndex(zone, "/")+1:], nil -} - -// InstanceAttributes returns the list of user-defined attributes, -// assigned when initially creating a GCE VM instance. The value of an -// attribute can be obtained with InstanceAttributeValue. -func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } - -// ProjectAttributes returns the list of user-defined attributes -// applying to the project as a whole, not just this VM. The value of -// an attribute can be obtained with ProjectAttributeValue. -func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } - -func lines(suffix string) ([]string, error) { - j, err := Get(suffix) - if err != nil { - return nil, err - } - s := strings.Split(strings.TrimSpace(j), "\n") - for i := range s { - s[i] = strings.TrimSpace(s[i]) - } - return s, nil -} - -// InstanceAttributeValue returns the value of the provided VM -// instance attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// InstanceAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func InstanceAttributeValue(attr string) (string, error) { - return Get("instance/attributes/" + attr) -} - -// ProjectAttributeValue returns the value of the provided -// project attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// ProjectAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func ProjectAttributeValue(attr string) (string, error) { - return Get("project/attributes/" + attr) -} - -// Scopes returns the service account scopes for the given account. -// The account may be empty or the string "default" to use the instance's -// main account. -func Scopes(serviceAccount string) ([]string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return lines("instance/service-accounts/" + serviceAccount + "/scopes") -} - -func strsContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} diff --git a/vendor/cloud.google.com/go/internal/BUILD b/vendor/cloud.google.com/go/internal/BUILD deleted file mode 100644 index e29350746747..000000000000 --- a/vendor/cloud.google.com/go/internal/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["cloud.go"], - importpath = "cloud.google.com/go/internal", -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/cloud.google.com/go/internal/cloud.go b/vendor/cloud.google.com/go/internal/cloud.go deleted file mode 100644 index 8e0c8f8e525c..000000000000 --- a/vendor/cloud.google.com/go/internal/cloud.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package internal provides support for the cloud packages. -// -// Users should not import this package directly. -package internal - -import ( - "fmt" - "net/http" -) - -const userAgent = "gcloud-golang/0.1" - -// Transport is an http.RoundTripper that appends Google Cloud client's -// user-agent to the original request's user-agent header. -type Transport struct { - // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does. - // Do User-Agent some other way. - - // Base is the actual http.RoundTripper - // requests will use. It must not be nil. - Base http.RoundTripper -} - -// RoundTrip appends a user-agent to the existing user-agent -// header and delegates the request to the base http.RoundTripper. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - req = cloneRequest(req) - ua := req.Header.Get("User-Agent") - if ua == "" { - ua = userAgent - } else { - ua = fmt.Sprintf("%s %s", ua, userAgent) - } - req.Header.Set("User-Agent", ua) - return t.Base.RoundTrip(req) -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} diff --git a/vendor/cloud.google.com/go/pubsub/BUILD b/vendor/cloud.google.com/go/pubsub/BUILD deleted file mode 100644 index 3dfb1dfd7fd6..000000000000 --- a/vendor/cloud.google.com/go/pubsub/BUILD +++ /dev/null @@ -1,44 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "acker.go", - "doc.go", - "iterator.go", - "keepalive.go", - "message.go", - "pubsub.go", - "puller.go", - "service.go", - "subscription.go", - "topic.go", - ], - importpath = "cloud.google.com/go/pubsub", - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/api/googleapi:go_default_library", - "//vendor/google.golang.org/api/iterator:go_default_library", - "//vendor/google.golang.org/api/option:go_default_library", - "//vendor/google.golang.org/api/pubsub/v1:go_default_library", - "//vendor/google.golang.org/api/transport:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/cloud.google.com/go/pubsub/acker.go b/vendor/cloud.google.com/go/pubsub/acker.go deleted file mode 100644 index 088ed79cf952..000000000000 --- a/vendor/cloud.google.com/go/pubsub/acker.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pubsub - -import ( - "sync" - "time" - - "golang.org/x/net/context" -) - -// ackBuffer stores the pending ack IDs and notifies the Dirty channel when it becomes non-empty. -type ackBuffer struct { - Dirty chan struct{} - // Close done when ackBuffer is no longer needed. - Done chan struct{} - - mu sync.Mutex - pending []string - send bool -} - -// Add adds ackID to the buffer. -func (buf *ackBuffer) Add(ackID string) { - buf.mu.Lock() - defer buf.mu.Unlock() - buf.pending = append(buf.pending, ackID) - - // If we are transitioning into a non-empty notification state. - if buf.send && len(buf.pending) == 1 { - buf.notify() - } -} - -// RemoveAll removes all ackIDs from the buffer and returns them. -func (buf *ackBuffer) RemoveAll() []string { - buf.mu.Lock() - defer buf.mu.Unlock() - - ret := buf.pending - buf.pending = nil - return ret -} - -// SendNotifications enables sending dirty notification on empty -> non-empty transitions. -// If the buffer is already non-empty, a notification will be sent immediately. -func (buf *ackBuffer) SendNotifications() { - buf.mu.Lock() - defer buf.mu.Unlock() - - buf.send = true - // If we are transitioning into a non-empty notification state. - if len(buf.pending) > 0 { - buf.notify() - } -} - -func (buf *ackBuffer) notify() { - go func() { - select { - case buf.Dirty <- struct{}{}: - case <-buf.Done: - } - }() -} - -// acker acks messages in batches. -type acker struct { - s service - Ctx context.Context // The context to use when acknowledging messages. - Sub string // The full name of the subscription. - AckTick <-chan time.Time // AckTick supplies the frequency with which to make ack requests. - - // Notify is called with an ack ID after the message with that ack ID - // has been processed. An ackID is considered to have been processed - // if at least one attempt has been made to acknowledge it. - Notify func(string) - - ackBuffer - - wg sync.WaitGroup - done chan struct{} -} - -// Start intiates processing of ackIDs which are added via Add. -// Notify is called with each ackID once it has been processed. -func (a *acker) Start() { - a.done = make(chan struct{}) - a.ackBuffer.Dirty = make(chan struct{}) - a.ackBuffer.Done = a.done - - a.wg.Add(1) - go func() { - defer a.wg.Done() - for { - select { - case <-a.ackBuffer.Dirty: - a.ack(a.ackBuffer.RemoveAll()) - case <-a.AckTick: - a.ack(a.ackBuffer.RemoveAll()) - case <-a.done: - return - } - } - - }() -} - -// Ack adds an ack id to be acked in the next batch. -func (a *acker) Ack(ackID string) { - a.ackBuffer.Add(ackID) -} - -// FastMode switches acker into a mode which acks messages as they arrive, rather than waiting -// for a.AckTick. -func (a *acker) FastMode() { - a.ackBuffer.SendNotifications() -} - -// Stop drops all pending messages, and releases resources before returning. -func (a *acker) Stop() { - close(a.done) - a.wg.Wait() -} - -const maxAckAttempts = 2 - -// ack acknowledges the supplied ackIDs. -// After the acknowledgement request has completed (regardless of its success -// or failure), ids will be passed to a.Notify. -func (a *acker) ack(ids []string) { - head, tail := a.s.splitAckIDs(ids) - for len(head) > 0 { - for i := 0; i < maxAckAttempts; i++ { - if a.s.acknowledge(a.Ctx, a.Sub, head) == nil { - break - } - } - // NOTE: if retry gives up and returns an error, we simply drop - // those ack IDs. The messages will be redelivered and this is - // a documented behaviour of the API. - head, tail = a.s.splitAckIDs(tail) - } - for _, id := range ids { - a.Notify(id) - } -} diff --git a/vendor/cloud.google.com/go/pubsub/doc.go b/vendor/cloud.google.com/go/pubsub/doc.go deleted file mode 100644 index e1be84a8a308..000000000000 --- a/vendor/cloud.google.com/go/pubsub/doc.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package pubsub provides an easy way to publish and receive Google Cloud Pub/Sub -messages, hiding the the details of the underlying server RPCs. Google Cloud -Pub/Sub is a many-to-many, asynchronous messaging system that decouples senders -and receivers. - -Note: This package is experimental and may make backwards-incompatible changes. - -More information about Google Cloud Pub/Sub is available at -https://cloud.google.com/pubsub/docs - -Publishing - -Google Cloud Pub/Sub messages are published to topics. Topics may be created -using the pubsub package like so: - - topic, err := pubsubClient.NewTopic(context.Background(), "topic-name") - -Messages may then be published to a topic: - - msgIDs, err := topic.Publish(ctx, &pubsub.Message{ - Data: []byte("payload"), - }) - -Receiving - -To receive messages published to a topic, clients create subscriptions -to the topic. There may be more than one subscription per topic; each message -that is published to the topic will be delivered to all of its subscriptions. - -Subsciptions may be created like so: - - sub, err := pubsubClient.NewSubscription(context.Background(), "sub-name", topic, 0, nil) - -Messages are then consumed from a subscription via an iterator: - - // Construct the iterator - it, err := sub.Pull(context.Background()) - if err != nil { - // handle err ... - } - defer it.Stop() - - // Consume N messages - for i := 0; i < N; i++ { - msg, err := it.Next() - if err == pubsub.Done { - break - } - if err != nil { - // handle err ... - break - } - - log.Print("got message: ", string(msg.Data)) - msg.Done(true) - } - -The message iterator returns messages one at a time, fetching batches of -messages behind the scenes as needed. Once client code has processed the -message, it must call Message.Done, otherwise the message will eventually be -redelivered. For more information and configuration options, see "Deadlines" -below. - -Note: It is possible for Messages to be redelivered, even if Message.Done has -been called. Client code must be robust to multiple deliveries of messages. - -Deadlines - -The default pubsub deadlines are suitable for most use cases, but may be -overridden. This section describes the tradeoffs that should be considered -when overriding the defaults. - -Behind the scenes, each message returned by the Pub/Sub server has an -associated lease, known as an "ACK deadline". -Unless a message is acknowledged within the ACK deadline, or the client requests that -the ACK deadline be extended, the message will become elegible for redelivery. -As a convenience, the pubsub package will automatically extend deadlines until -either: - * Message.Done is called, or - * the "MaxExtension" period elapses from the time the message is fetched from the server. - -The initial ACK deadline given to each messages defaults to 10 seconds, but may -be overridden during subscription creation. Selecting an ACK deadline is a -tradeoff between message redelivery latency and RPC volume. If the pubsub -package fails to acknowledge or extend a message (e.g. due to unexpected -termination of the process), a shorter ACK deadline will generally result in -faster message redelivery by the Pub/Sub system. However, a short ACK deadline -may also increase the number of deadline extension RPCs that the pubsub package -sends to the server. - -The default max extension period is DefaultMaxExtension, and can be overridden -by passing a MaxExtension option to Subscription.Pull. Selecting a max -extension period is a tradeoff between the speed at which client code must -process messages, and the redelivery delay if messages fail to be acknowledged -(e.g. because client code neglects to do so). Using a large MaxExtension -increases the available time for client code to process messages. However, if -the client code neglects to call Message.Done, a large MaxExtension will -increase the delay before the message is redelivered. -*/ -package pubsub diff --git a/vendor/cloud.google.com/go/pubsub/iterator.go b/vendor/cloud.google.com/go/pubsub/iterator.go deleted file mode 100644 index 672103309dc4..000000000000 --- a/vendor/cloud.google.com/go/pubsub/iterator.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pubsub - -import ( - "sync" - "time" - - "golang.org/x/net/context" - "google.golang.org/api/iterator" -) - -// Done is returned when an iteration is complete. -var Done = iterator.Done - -type Iterator struct { - // kaTicker controls how often we send an ack deadline extension request. - kaTicker *time.Ticker - // ackTicker controls how often we acknowledge a batch of messages. - ackTicker *time.Ticker - - ka *keepAlive - acker *acker - puller *puller - - // mu ensures that cleanup only happens once, and concurrent Stop - // invocations block until cleanup completes. - mu sync.Mutex - - // closed is used to signal that Stop has been called. - closed chan struct{} -} - -// newIterator starts a new Iterator. Stop must be called on the Iterator -// when it is no longer needed. -// subName is the full name of the subscription to pull messages from. -// ctx is the context to use for acking messages and extending message deadlines. -func newIterator(ctx context.Context, s service, subName string, po *pullOptions) *Iterator { - // TODO: make kaTicker frequency more configurable. - // (ackDeadline - 5s) is a reasonable default for now, because the minimum ack period is 10s. This gives us 5s grace. - keepAlivePeriod := po.ackDeadline - 5*time.Second - kaTicker := time.NewTicker(keepAlivePeriod) // Stopped in it.Stop - - // TODO: make ackTicker more configurable. Something less than - // kaTicker is a reasonable default (there's no point extending - // messages when they could be acked instead). - ackTicker := time.NewTicker(keepAlivePeriod / 2) // Stopped in it.Stop - - ka := &keepAlive{ - s: s, - Ctx: ctx, - Sub: subName, - ExtensionTick: kaTicker.C, - Deadline: po.ackDeadline, - MaxExtension: po.maxExtension, - } - - ack := &acker{ - s: s, - Ctx: ctx, - Sub: subName, - AckTick: ackTicker.C, - Notify: ka.Remove, - } - - pull := newPuller(s, subName, ctx, int64(po.maxPrefetch), ka.Add, ka.Remove) - - ka.Start() - ack.Start() - return &Iterator{ - kaTicker: kaTicker, - ackTicker: ackTicker, - ka: ka, - acker: ack, - puller: pull, - closed: make(chan struct{}), - } -} - -// Next returns the next Message to be processed. The caller must call -// Message.Done when finished with it. -// Once Stop has been called, calls to Next will return Done. -func (it *Iterator) Next() (*Message, error) { - m, err := it.puller.Next() - - if err == nil { - m.it = it - return m, nil - } - - select { - // If Stop has been called, we return Done regardless the value of err. - case <-it.closed: - return nil, Done - default: - return nil, err - } -} - -// Client code must call Stop on an Iterator when finished with it. -// Stop will block until Done has been called on all Messages that have been -// returned by Next, or until the context with which the Iterator was created -// is cancelled or exceeds its deadline. -// Stop need only be called once, but may be called multiple times from -// multiple goroutines. -func (it *Iterator) Stop() { - it.mu.Lock() - defer it.mu.Unlock() - - select { - case <-it.closed: - // Cleanup has already been performed. - return - default: - } - - // We close this channel before calling it.puller.Stop to ensure that we - // reliably return Done from Next. - close(it.closed) - - // Stop the puller. Once this completes, no more messages will be added - // to it.ka. - it.puller.Stop() - - // Start acking messages as they arrive, ignoring ackTicker. This will - // result in it.ka.Stop, below, returning as soon as possible. - it.acker.FastMode() - - // This will block until - // (a) it.ka.Ctx is done, or - // (b) all messages have been removed from keepAlive. - // (b) will happen once all outstanding messages have been either ACKed or NACKed. - it.ka.Stop() - - // There are no more live messages, so kill off the acker. - it.acker.Stop() - - it.kaTicker.Stop() - it.ackTicker.Stop() -} - -func (it *Iterator) done(ackID string, ack bool) { - if ack { - it.acker.Ack(ackID) - // There's no need to call it.ka.Remove here, as acker will - // call it via its Notify function. - } else { - // TODO: explicitly NACK the message by sending an - // ModifyAckDeadline request with 0s deadline, to make the - // message immediately available for redelivery. - it.ka.Remove(ackID) - } -} diff --git a/vendor/cloud.google.com/go/pubsub/keepalive.go b/vendor/cloud.google.com/go/pubsub/keepalive.go deleted file mode 100644 index 11b5739f4ae1..000000000000 --- a/vendor/cloud.google.com/go/pubsub/keepalive.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pubsub - -import ( - "sync" - "time" - - "golang.org/x/net/context" -) - -// keepAlive keeps track of which Messages need to have their deadline extended, and -// periodically extends them. -// Messages are tracked by Ack ID. -type keepAlive struct { - s service - Ctx context.Context // The context to use when extending deadlines. - Sub string // The full name of the subscription. - ExtensionTick <-chan time.Time // ExtenstionTick supplies the frequency with which to make extension requests. - Deadline time.Duration // How long to extend messages for each time they are extended. Should be greater than ExtensionTick frequency. - MaxExtension time.Duration // How long to keep extending each message's ack deadline before automatically removing it. - - mu sync.Mutex - // key: ackID; value: time at which ack deadline extension should cease. - items map[string]time.Time - dr drain - - wg sync.WaitGroup -} - -// Start initiates the deadline extension loop. Stop must be called once keepAlive is no longer needed. -func (ka *keepAlive) Start() { - ka.items = make(map[string]time.Time) - ka.dr = drain{Drained: make(chan struct{})} - ka.wg.Add(1) - go func() { - defer ka.wg.Done() - for { - select { - case <-ka.Ctx.Done(): - // Don't bother waiting for items to be removed: we can't extend them any more. - return - case <-ka.dr.Drained: - return - case <-ka.ExtensionTick: - live, expired := ka.getAckIDs() - ka.wg.Add(1) - go func() { - defer ka.wg.Done() - ka.extendDeadlines(live) - }() - - for _, id := range expired { - ka.Remove(id) - } - } - } - }() -} - -// Add adds an ack id to be kept alive. -// It should not be called after Stop. -func (ka *keepAlive) Add(ackID string) { - ka.mu.Lock() - defer ka.mu.Unlock() - - ka.items[ackID] = time.Now().Add(ka.MaxExtension) - ka.dr.SetPending(true) -} - -// Remove removes ackID from the list to be kept alive. -func (ka *keepAlive) Remove(ackID string) { - ka.mu.Lock() - defer ka.mu.Unlock() - - // Note: If users NACKs a message after it has been removed due to - // expiring, Remove will be called twice with same ack id. This is OK. - delete(ka.items, ackID) - ka.dr.SetPending(len(ka.items) != 0) -} - -// Stop waits until all added ackIDs have been removed, and cleans up resources. -// Stop may only be called once. -func (ka *keepAlive) Stop() { - ka.mu.Lock() - ka.dr.Drain() - ka.mu.Unlock() - - ka.wg.Wait() -} - -// getAckIDs returns the set of ackIDs that are being kept alive. -// The set is divided into two lists: one with IDs that should continue to be kept alive, -// and the other with IDs that should be dropped. -func (ka *keepAlive) getAckIDs() (live, expired []string) { - ka.mu.Lock() - defer ka.mu.Unlock() - - now := time.Now() - for id, expiry := range ka.items { - if expiry.Before(now) { - expired = append(expired, id) - } else { - live = append(live, id) - } - } - return live, expired -} - -const maxExtensionAttempts = 2 - -func (ka *keepAlive) extendDeadlines(ackIDs []string) { - head, tail := ka.s.splitAckIDs(ackIDs) - for len(head) > 0 { - for i := 0; i < maxExtensionAttempts; i++ { - if ka.s.modifyAckDeadline(ka.Ctx, ka.Sub, ka.Deadline, head) == nil { - break - } - } - // NOTE: Messages whose deadlines we fail to extend will - // eventually be redelivered and this is a documented behaviour - // of the API. - // - // NOTE: If we fail to extend deadlines here, this - // implementation will continue to attempt extending the - // deadlines for those ack IDs the next time the extension - // ticker ticks. By then the deadline will have expired. - // Re-extending them is harmless, however. - // - // TODO: call Remove for ids which fail to be extended. - - head, tail = ka.s.splitAckIDs(tail) - } -} - -// A drain (once started) indicates via a channel when there is no work pending. -type drain struct { - started bool - pending bool - - // Drained is closed once there are no items outstanding if Drain has been called. - Drained chan struct{} -} - -// Drain starts the drain process. This cannot be undone. -func (d *drain) Drain() { - d.started = true - d.closeIfDrained() -} - -// SetPending sets whether there is work pending or not. It may be called multiple times before or after Drain. -func (d *drain) SetPending(pending bool) { - d.pending = pending - d.closeIfDrained() -} - -func (d *drain) closeIfDrained() { - if !d.pending && d.started { - // Check to see if d.Drained is closed before closing it. - // This allows SetPending(false) to be safely called multiple times. - select { - case <-d.Drained: - default: - close(d.Drained) - } - } -} diff --git a/vendor/cloud.google.com/go/pubsub/message.go b/vendor/cloud.google.com/go/pubsub/message.go deleted file mode 100644 index 7a4c0cb72ccb..000000000000 --- a/vendor/cloud.google.com/go/pubsub/message.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pubsub - -import ( - "encoding/base64" - - raw "google.golang.org/api/pubsub/v1" -) - -// Message represents a Pub/Sub message. -type Message struct { - // ID identifies this message. - // This ID is assigned by the server and is populated for Messages obtained from a subscription. - // It is otherwise ignored. - ID string - - // Data is the actual data in the message. - Data []byte - - // Attributes represents the key-value pairs the current message - // is labelled with. - Attributes map[string]string - - // ackID is the identifier to acknowledge this message. - ackID string - - // TODO(mcgreevy): add publish time. - - calledDone bool - - // The iterator that created this Message. - it *Iterator -} - -func toMessage(resp *raw.ReceivedMessage) (*Message, error) { - if resp.Message == nil { - return &Message{ackID: resp.AckId}, nil - } - data, err := base64.StdEncoding.DecodeString(resp.Message.Data) - if err != nil { - return nil, err - } - return &Message{ - ackID: resp.AckId, - Data: data, - Attributes: resp.Message.Attributes, - ID: resp.Message.MessageId, - }, nil -} - -// Done completes the processing of a Message that was returned from an Iterator. -// ack indicates whether the message should be acknowledged. -// Client code must call Done when finished for each Message returned by an iterator. -// Done may only be called on Messages returned by an iterator. -// If message acknowledgement fails, the Message will be redelivered. -// Calls to Done have no effect after the first call. -// -// See Iterator.Next for an example. -func (m *Message) Done(ack bool) { - if m.calledDone { - return - } - m.calledDone = true - m.it.done(m.ackID, ack) -} diff --git a/vendor/cloud.google.com/go/pubsub/pubsub.go b/vendor/cloud.google.com/go/pubsub/pubsub.go deleted file mode 100644 index b68adc3a07e0..000000000000 --- a/vendor/cloud.google.com/go/pubsub/pubsub.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pubsub - -import ( - "fmt" - "net/http" - "os" - - "google.golang.org/api/option" - raw "google.golang.org/api/pubsub/v1" - "google.golang.org/api/transport" - - "golang.org/x/net/context" -) - -const ( - // ScopePubSub grants permissions to view and manage Pub/Sub - // topics and subscriptions. - ScopePubSub = "https://www.googleapis.com/auth/pubsub" - - // ScopeCloudPlatform grants permissions to view and manage your data - // across Google Cloud Platform services. - ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" -) - -const prodAddr = "https://pubsub.googleapis.com/" -const userAgent = "gcloud-golang-pubsub/20151008" - -// Client is a Google Pub/Sub client, which may be used to perform Pub/Sub operations with a project. -// It must be constructed via NewClient. -type Client struct { - projectID string - s service -} - -// NewClient creates a new PubSub client. -func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { - var o []option.ClientOption - // Environment variables for gcloud emulator: - // https://option.google.com/sdk/gcloud/reference/beta/emulators/pubsub/ - if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" { - o = []option.ClientOption{ - option.WithEndpoint("http://" + addr + "/"), - option.WithHTTPClient(http.DefaultClient), - } - } else { - o = []option.ClientOption{ - option.WithEndpoint(prodAddr), - option.WithScopes(raw.PubsubScope, raw.CloudPlatformScope), - option.WithUserAgent(userAgent), - } - } - o = append(o, opts...) - httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...) - if err != nil { - return nil, fmt.Errorf("dialing: %v", err) - } - - s, err := newPubSubService(httpClient, endpoint) - if err != nil { - return nil, fmt.Errorf("constructing pubsub client: %v", err) - } - - c := &Client{ - projectID: projectID, - s: s, - } - - return c, nil -} - -func (c *Client) fullyQualifiedProjectName() string { - return fmt.Sprintf("projects/%s", c.projectID) -} - -// pageToken stores the next page token for a server response which is split over multiple pages. -type pageToken struct { - tok string - explicit bool -} - -func (pt *pageToken) set(tok string) { - pt.tok = tok - pt.explicit = true -} - -func (pt *pageToken) get() string { - return pt.tok -} - -// more returns whether further pages should be fetched from the server. -func (pt *pageToken) more() bool { - return pt.tok != "" || !pt.explicit -} - -// stringsIterator provides an iterator API for a sequence of API page fetches that return lists of strings. -type stringsIterator struct { - ctx context.Context - strings []string - token pageToken - fetch func(ctx context.Context, tok string) (*stringsPage, error) -} - -// Next returns the next string. If there are no more strings, Done will be returned. -func (si *stringsIterator) Next() (string, error) { - for len(si.strings) == 0 && si.token.more() { - page, err := si.fetch(si.ctx, si.token.get()) - if err != nil { - return "", err - } - si.token.set(page.tok) - si.strings = page.strings - } - - if len(si.strings) == 0 { - return "", Done - } - - s := si.strings[0] - si.strings = si.strings[1:] - - return s, nil -} diff --git a/vendor/cloud.google.com/go/pubsub/puller.go b/vendor/cloud.google.com/go/pubsub/puller.go deleted file mode 100644 index bc2ea8aad4be..000000000000 --- a/vendor/cloud.google.com/go/pubsub/puller.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pubsub - -import ( - "sync" - - "golang.org/x/net/context" -) - -// puller fetches messages from the server in a batch. -type puller struct { - ctx context.Context - cancel context.CancelFunc - - // keepAlive takes ownership of the lifetime of the message identified - // by ackID, ensuring that its ack deadline does not expire. It should - // be called each time a new message is fetched from the server, even - // if it is not yet returned from Next. - keepAlive func(ackID string) - - // abandon should be called for each message which has previously been - // passed to keepAlive, but will never be returned by Next. - abandon func(ackID string) - - // fetch fetches a batch of messages from the server. - fetch func() ([]*Message, error) - - mu sync.Mutex - buf []*Message -} - -// newPuller constructs a new puller. -// batchSize is the maximum number of messages to fetch at once. -// No more than batchSize messages will be outstanding at any time. -func newPuller(s service, subName string, ctx context.Context, batchSize int64, keepAlive, abandon func(ackID string)) *puller { - ctx, cancel := context.WithCancel(ctx) - return &puller{ - cancel: cancel, - keepAlive: keepAlive, - abandon: abandon, - ctx: ctx, - fetch: func() ([]*Message, error) { return s.fetchMessages(ctx, subName, batchSize) }, - } -} - -const maxPullAttempts = 2 - -// Next returns the next message from the server, fetching a new batch if necessary. -// keepAlive is called with the ackIDs of newly fetched messages. -// If p.Ctx has already been cancelled before Next is called, no new messages -// will be fetched. -func (p *puller) Next() (*Message, error) { - p.mu.Lock() - defer p.mu.Unlock() - - // If ctx has been cancelled, return straight away (even if there are buffered messages available). - select { - case <-p.ctx.Done(): - return nil, p.ctx.Err() - default: - } - - for len(p.buf) == 0 { - var buf []*Message - var err error - - for i := 0; i < maxPullAttempts; i++ { - // Once Stop has completed, all future calls to Next will immediately fail at this point. - buf, err = p.fetch() - if err == nil || err == context.Canceled || err == context.DeadlineExceeded { - break - } - } - if err != nil { - return nil, err - } - - for _, m := range buf { - p.keepAlive(m.ackID) - } - p.buf = buf - } - - m := p.buf[0] - p.buf = p.buf[1:] - return m, nil -} - -// Stop aborts any pending calls to Next, and prevents any future ones from succeeding. -// Stop also abandons any messages that have been pre-fetched. -// Once Stop completes, no calls to Next will succeed. -func (p *puller) Stop() { - // Next may be executing in another goroutine. Cancel it, and then wait until it terminates. - p.cancel() - p.mu.Lock() - defer p.mu.Unlock() - - for _, m := range p.buf { - p.abandon(m.ackID) - } - p.buf = nil -} diff --git a/vendor/cloud.google.com/go/pubsub/service.go b/vendor/cloud.google.com/go/pubsub/service.go deleted file mode 100644 index e894ec2d03ec..000000000000 --- a/vendor/cloud.google.com/go/pubsub/service.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pubsub - -import ( - "encoding/base64" - "fmt" - "net/http" - "time" - - "golang.org/x/net/context" - "google.golang.org/api/googleapi" - raw "google.golang.org/api/pubsub/v1" -) - -// service provides an internal abstraction to isolate the generated -// PubSub API; most of this package uses this interface instead. -// The single implementation, *apiService, contains all the knowledge -// of the generated PubSub API (except for that present in legacy code). -type service interface { - createSubscription(ctx context.Context, topicName, subName string, ackDeadline time.Duration, pushConfig *PushConfig) error - getSubscriptionConfig(ctx context.Context, subName string) (*SubscriptionConfig, string, error) - listProjectSubscriptions(ctx context.Context, projName, pageTok string) (*stringsPage, error) - deleteSubscription(ctx context.Context, name string) error - subscriptionExists(ctx context.Context, name string) (bool, error) - modifyPushConfig(ctx context.Context, subName string, conf *PushConfig) error - - createTopic(ctx context.Context, name string) error - deleteTopic(ctx context.Context, name string) error - topicExists(ctx context.Context, name string) (bool, error) - listProjectTopics(ctx context.Context, projName, pageTok string) (*stringsPage, error) - listTopicSubscriptions(ctx context.Context, topicName, pageTok string) (*stringsPage, error) - - modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error - fetchMessages(ctx context.Context, subName string, maxMessages int64) ([]*Message, error) - publishMessages(ctx context.Context, topicName string, msgs []*Message) ([]string, error) - - // splitAckIDs divides ackIDs into - // * a batch of a size which is suitable for passing to acknowledge or - // modifyAckDeadline, and - // * the rest. - splitAckIDs(ackIDs []string) ([]string, []string) - - // acknowledge ACKs the IDs in ackIDs. - acknowledge(ctx context.Context, subName string, ackIDs []string) error -} - -type apiService struct { - s *raw.Service -} - -func newPubSubService(client *http.Client, endpoint string) (*apiService, error) { - s, err := raw.New(client) - if err != nil { - return nil, err - } - s.BasePath = endpoint - - return &apiService{s: s}, nil -} - -func (s *apiService) createSubscription(ctx context.Context, topicName, subName string, ackDeadline time.Duration, pushConfig *PushConfig) error { - var rawPushConfig *raw.PushConfig - if pushConfig != nil { - rawPushConfig = &raw.PushConfig{ - Attributes: pushConfig.Attributes, - PushEndpoint: pushConfig.Endpoint, - } - } - rawSub := &raw.Subscription{ - AckDeadlineSeconds: int64(ackDeadline.Seconds()), - PushConfig: rawPushConfig, - Topic: topicName, - } - _, err := s.s.Projects.Subscriptions.Create(subName, rawSub).Context(ctx).Do() - return err -} - -func (s *apiService) getSubscriptionConfig(ctx context.Context, subName string) (*SubscriptionConfig, string, error) { - rawSub, err := s.s.Projects.Subscriptions.Get(subName).Context(ctx).Do() - if err != nil { - return nil, "", err - } - sub := &SubscriptionConfig{ - AckDeadline: time.Second * time.Duration(rawSub.AckDeadlineSeconds), - PushConfig: PushConfig{ - Endpoint: rawSub.PushConfig.PushEndpoint, - Attributes: rawSub.PushConfig.Attributes, - }, - } - return sub, rawSub.Topic, err -} - -// stringsPage contains a list of strings and a token for fetching the next page. -type stringsPage struct { - strings []string - tok string -} - -func (s *apiService) listProjectSubscriptions(ctx context.Context, projName, pageTok string) (*stringsPage, error) { - resp, err := s.s.Projects.Subscriptions.List(projName).PageToken(pageTok).Context(ctx).Do() - if err != nil { - return nil, err - } - subs := []string{} - for _, sub := range resp.Subscriptions { - subs = append(subs, sub.Name) - } - return &stringsPage{subs, resp.NextPageToken}, nil -} - -func (s *apiService) deleteSubscription(ctx context.Context, name string) error { - _, err := s.s.Projects.Subscriptions.Delete(name).Context(ctx).Do() - return err -} - -func (s *apiService) subscriptionExists(ctx context.Context, name string) (bool, error) { - _, err := s.s.Projects.Subscriptions.Get(name).Context(ctx).Do() - if err == nil { - return true, nil - } - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return false, nil - } - return false, err -} - -func (s *apiService) createTopic(ctx context.Context, name string) error { - // Note: The raw API expects a Topic body, but ignores it. - _, err := s.s.Projects.Topics.Create(name, &raw.Topic{}). - Context(ctx). - Do() - return err -} - -func (s *apiService) listProjectTopics(ctx context.Context, projName, pageTok string) (*stringsPage, error) { - resp, err := s.s.Projects.Topics.List(projName).PageToken(pageTok).Context(ctx).Do() - if err != nil { - return nil, err - } - topics := []string{} - for _, topic := range resp.Topics { - topics = append(topics, topic.Name) - } - return &stringsPage{topics, resp.NextPageToken}, nil -} - -func (s *apiService) deleteTopic(ctx context.Context, name string) error { - _, err := s.s.Projects.Topics.Delete(name).Context(ctx).Do() - return err -} - -func (s *apiService) topicExists(ctx context.Context, name string) (bool, error) { - _, err := s.s.Projects.Topics.Get(name).Context(ctx).Do() - if err == nil { - return true, nil - } - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return false, nil - } - return false, err -} - -func (s *apiService) listTopicSubscriptions(ctx context.Context, topicName, pageTok string) (*stringsPage, error) { - resp, err := s.s.Projects.Topics.Subscriptions.List(topicName).PageToken(pageTok).Context(ctx).Do() - if err != nil { - return nil, err - } - subs := []string{} - for _, sub := range resp.Subscriptions { - subs = append(subs, sub) - } - return &stringsPage{subs, resp.NextPageToken}, nil -} - -func (s *apiService) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error { - req := &raw.ModifyAckDeadlineRequest{ - AckDeadlineSeconds: int64(deadline.Seconds()), - AckIds: ackIDs, - } - _, err := s.s.Projects.Subscriptions.ModifyAckDeadline(subName, req). - Context(ctx). - Do() - return err -} - -// maxPayload is the maximum number of bytes to devote to actual ids in -// acknowledgement or modifyAckDeadline requests. Note that there is ~1K of -// constant overhead, plus 3 bytes per ID (two quotes and a comma). The total -// payload size may not exceed 512K. -const maxPayload = 500 * 1024 -const overheadPerID = 3 // 3 bytes of JSON - -// splitAckIDs splits ids into two slices, the first of which contains at most maxPayload bytes of ackID data. -func (s *apiService) splitAckIDs(ids []string) ([]string, []string) { - total := 0 - for i, id := range ids { - total += len(id) + overheadPerID - if total > maxPayload { - return ids[:i], ids[i:] - } - } - return ids, nil -} - -func (s *apiService) acknowledge(ctx context.Context, subName string, ackIDs []string) error { - req := &raw.AcknowledgeRequest{ - AckIds: ackIDs, - } - _, err := s.s.Projects.Subscriptions.Acknowledge(subName, req). - Context(ctx). - Do() - return err -} - -func (s *apiService) fetchMessages(ctx context.Context, subName string, maxMessages int64) ([]*Message, error) { - req := &raw.PullRequest{ - MaxMessages: maxMessages, - } - resp, err := s.s.Projects.Subscriptions.Pull(subName, req). - Context(ctx). - Do() - - if err != nil { - return nil, err - } - - msgs := make([]*Message, 0, len(resp.ReceivedMessages)) - for i, m := range resp.ReceivedMessages { - msg, err := toMessage(m) - if err != nil { - return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, message: %+v", i, m) - } - msgs = append(msgs, msg) - } - - return msgs, nil -} - -func (s *apiService) publishMessages(ctx context.Context, topicName string, msgs []*Message) ([]string, error) { - rawMsgs := make([]*raw.PubsubMessage, len(msgs)) - for i, msg := range msgs { - rawMsgs[i] = &raw.PubsubMessage{ - Data: base64.StdEncoding.EncodeToString(msg.Data), - Attributes: msg.Attributes, - } - } - - req := &raw.PublishRequest{Messages: rawMsgs} - resp, err := s.s.Projects.Topics.Publish(topicName, req). - Context(ctx). - Do() - - if err != nil { - return nil, err - } - return resp.MessageIds, nil -} - -func (s *apiService) modifyPushConfig(ctx context.Context, subName string, conf *PushConfig) error { - req := &raw.ModifyPushConfigRequest{ - PushConfig: &raw.PushConfig{ - Attributes: conf.Attributes, - PushEndpoint: conf.Endpoint, - }, - } - _, err := s.s.Projects.Subscriptions.ModifyPushConfig(subName, req). - Context(ctx). - Do() - return err -} diff --git a/vendor/cloud.google.com/go/pubsub/subscription.go b/vendor/cloud.google.com/go/pubsub/subscription.go deleted file mode 100644 index 61bbec93fcf1..000000000000 --- a/vendor/cloud.google.com/go/pubsub/subscription.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pubsub - -import ( - "errors" - "fmt" - "strings" - "time" - - "golang.org/x/net/context" -) - -// The default period for which to automatically extend Message acknowledgement deadlines. -const DefaultMaxExtension = 10 * time.Minute - -// The default maximum number of messages that are prefetched from the server. -const DefaultMaxPrefetch = 100 - -// Subscription is a reference to a PubSub subscription. -type Subscription struct { - s service - - // The fully qualified identifier for the subscription, in the format "projects//subscriptions/" - name string -} - -// Subscription creates a reference to a subscription. -func (c *Client) Subscription(id string) *Subscription { - return &Subscription{ - s: c.s, - name: fmt.Sprintf("projects/%s/subscriptions/%s", c.projectID, id), - } -} - -// String returns the globally unique printable name of the subscription. -func (s *Subscription) String() string { - return s.name -} - -// ID returns the unique identifier of the subscription within its project. -func (s *Subscription) ID() string { - slash := strings.LastIndex(s.name, "/") - if slash == -1 { - // name is not a fully-qualified name. - panic("bad subscription name") - } - return s.name[slash+1:] -} - -// Subscriptions returns an iterator which returns all of the subscriptions for the client's project. -func (c *Client) Subscriptions(ctx context.Context) *SubscriptionIterator { - return &SubscriptionIterator{ - s: c.s, - stringsIterator: stringsIterator{ - ctx: ctx, - fetch: func(ctx context.Context, tok string) (*stringsPage, error) { - return c.s.listProjectSubscriptions(ctx, c.fullyQualifiedProjectName(), tok) - }, - }, - } -} - -// SubscriptionIterator is an iterator that returns a series of subscriptions. -type SubscriptionIterator struct { - s service - stringsIterator -} - -// Next returns the next subscription. If there are no more subscriptions, Done will be returned. -func (subs *SubscriptionIterator) Next() (*Subscription, error) { - subName, err := subs.stringsIterator.Next() - if err != nil { - return nil, err - } - return &Subscription{s: subs.s, name: subName}, nil -} - -// PushConfig contains configuration for subscriptions that operate in push mode. -type PushConfig struct { - // A URL locating the endpoint to which messages should be pushed. - Endpoint string - - // Endpoint configuration attributes. See https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions#PushConfig.FIELDS.attributes for more details. - Attributes map[string]string -} - -// Subscription config contains the configuration of a subscription. -type SubscriptionConfig struct { - Topic *Topic - PushConfig PushConfig - - // The default maximum time after a subscriber receives a message - // before the subscriber should acknowledge the message. Note: - // messages which are obtained via an Iterator need not be acknowledged - // within this deadline, as the deadline will be automatically - // extended. - AckDeadline time.Duration -} - -// Delete deletes the subscription. -func (s *Subscription) Delete(ctx context.Context) error { - return s.s.deleteSubscription(ctx, s.name) -} - -// Exists reports whether the subscription exists on the server. -func (s *Subscription) Exists(ctx context.Context) (bool, error) { - return s.s.subscriptionExists(ctx, s.name) -} - -// Config fetches the current configuration for the subscription. -func (s *Subscription) Config(ctx context.Context) (*SubscriptionConfig, error) { - conf, topicName, err := s.s.getSubscriptionConfig(ctx, s.name) - if err != nil { - return nil, err - } - conf.Topic = &Topic{ - s: s.s, - name: topicName, - } - return conf, nil -} - -// Pull returns an Iterator that can be used to fetch Messages. The Iterator -// will automatically extend the ack deadline of all fetched Messages, for the -// period specified by DefaultMaxExtension. This may be overridden by supplying -// a MaxExtension pull option. -// -// If ctx is cancelled or exceeds its deadline, outstanding acks or deadline -// extensions will fail. -// -// The caller must call Stop on the Iterator once finished with it. -func (s *Subscription) Pull(ctx context.Context, opts ...PullOption) (*Iterator, error) { - config, err := s.Config(ctx) - if err != nil { - return nil, err - } - po := processPullOptions(opts) - po.ackDeadline = config.AckDeadline - return newIterator(ctx, s.s, s.name, po), nil -} - -// ModifyPushConfig updates the endpoint URL and other attributes of a push subscription. -func (s *Subscription) ModifyPushConfig(ctx context.Context, conf *PushConfig) error { - if conf == nil { - return errors.New("must supply non-nil PushConfig") - } - - return s.s.modifyPushConfig(ctx, s.name, conf) -} - -// A PullOption is an optional argument to Subscription.Pull. -type PullOption interface { - setOptions(o *pullOptions) -} - -type pullOptions struct { - // maxExtension is the maximum period for which the iterator should - // automatically extend the ack deadline for each message. - maxExtension time.Duration - - // maxPrefetch is the maximum number of Messages to have in flight, to - // be returned by Iterator.Next. - maxPrefetch int - - // ackDeadline is the default ack deadline for the subscription. Not - // configurable via a PullOption. - ackDeadline time.Duration -} - -func processPullOptions(opts []PullOption) *pullOptions { - po := &pullOptions{ - maxExtension: DefaultMaxExtension, - maxPrefetch: DefaultMaxPrefetch, - } - - for _, o := range opts { - o.setOptions(po) - } - - return po -} - -type maxPrefetch int - -func (max maxPrefetch) setOptions(o *pullOptions) { - if o.maxPrefetch = int(max); o.maxPrefetch < 1 { - o.maxPrefetch = 1 - } -} - -// MaxPrefetch returns a PullOption that limits Message prefetching. -// -// For performance reasons, the pubsub library may prefetch a pool of Messages -// to be returned serially from Iterator.Next. MaxPrefetch is used to limit the -// the size of this pool. -// -// If num is less than 1, it will be treated as if it were 1. -func MaxPrefetch(num int) PullOption { - return maxPrefetch(num) -} - -type maxExtension time.Duration - -func (max maxExtension) setOptions(o *pullOptions) { - if o.maxExtension = time.Duration(max); o.maxExtension < 0 { - o.maxExtension = 0 - } -} - -// MaxExtension returns a PullOption that limits how long acks deadlines are -// extended for. -// -// An Iterator will automatically extend the ack deadline of all fetched -// Messages for the duration specified. Automatic deadline extension may be -// disabled by specifying a duration of 0. -func MaxExtension(duration time.Duration) PullOption { - return maxExtension(duration) -} - -// CreateSubscription creates a new subscription on a topic. -// -// name is the name of the subscription to create. It must start with a letter, -// and contain only letters ([A-Za-z]), numbers ([0-9]), dashes (-), -// underscores (_), periods (.), tildes (~), plus (+) or percent signs (%). It -// must be between 3 and 255 characters in length, and must not start with -// "goog". -// -// topic is the topic from which the subscription should receive messages. It -// need not belong to the same project as the subscription. -// -// ackDeadline is the maximum time after a subscriber receives a message before -// the subscriber should acknowledge the message. It must be between 10 and 600 -// seconds (inclusive), and is rounded down to the nearest second. If the -// provided ackDeadline is 0, then the default value of 10 seconds is used. -// Note: messages which are obtained via an Iterator need not be acknowledged -// within this deadline, as the deadline will be automatically extended. -// -// pushConfig may be set to configure this subscription for push delivery. -// -// If the subscription already exists an error will be returned. -func (c *Client) CreateSubscription(ctx context.Context, id string, topic *Topic, ackDeadline time.Duration, pushConfig *PushConfig) (*Subscription, error) { - if ackDeadline == 0 { - ackDeadline = 10 * time.Second - } - if d := ackDeadline.Seconds(); d < 10 || d > 600 { - return nil, fmt.Errorf("ack deadline must be between 10 and 600 seconds; got: %v", d) - } - - sub := c.Subscription(id) - err := c.s.createSubscription(ctx, topic.name, sub.name, ackDeadline, pushConfig) - return sub, err -} diff --git a/vendor/cloud.google.com/go/pubsub/topic.go b/vendor/cloud.google.com/go/pubsub/topic.go deleted file mode 100644 index 3e85104ad6fc..000000000000 --- a/vendor/cloud.google.com/go/pubsub/topic.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pubsub - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" -) - -const MaxPublishBatchSize = 1000 - -// Topic is a reference to a PubSub topic. -type Topic struct { - s service - - // The fully qualified identifier for the topic, in the format "projects//topics/" - name string -} - -// CreateTopic creates a new topic. -// The specified topic ID must start with a letter, and contain only letters -// ([A-Za-z]), numbers ([0-9]), dashes (-), underscores (_), periods (.), -// tildes (~), plus (+) or percent signs (%). It must be between 3 and 255 -// characters in length, and must not start with "goog". -// If the topic already exists an error will be returned. -func (c *Client) CreateTopic(ctx context.Context, id string) (*Topic, error) { - t := c.Topic(id) - err := c.s.createTopic(ctx, t.name) - return t, err -} - -// Topic creates a reference to a topic. -func (c *Client) Topic(id string) *Topic { - return &Topic{ - s: c.s, - name: fmt.Sprintf("projects/%s/topics/%s", c.projectID, id), - } -} - -// Topics returns an iterator which returns all of the topics for the client's project. -func (c *Client) Topics(ctx context.Context) *TopicIterator { - return &TopicIterator{ - s: c.s, - stringsIterator: stringsIterator{ - ctx: ctx, - fetch: func(ctx context.Context, tok string) (*stringsPage, error) { - return c.s.listProjectTopics(ctx, c.fullyQualifiedProjectName(), tok) - }, - }, - } -} - -// TopicIterator is an iterator that returns a series of topics. -type TopicIterator struct { - s service - stringsIterator -} - -// Next returns the next topic. If there are no more topics, Done will be returned. -func (tps *TopicIterator) Next() (*Topic, error) { - topicName, err := tps.stringsIterator.Next() - if err != nil { - return nil, err - } - return &Topic{s: tps.s, name: topicName}, nil -} - -// ID returns the unique idenfier of the topic within its project. -func (t *Topic) ID() string { - slash := strings.LastIndex(t.name, "/") - if slash == -1 { - // name is not a fully-qualified name. - panic("bad topic name") - } - return t.name[slash+1:] -} - -// String returns the printable globally unique name for the topic. -func (t *Topic) String() string { - return t.name -} - -// Delete deletes the topic. -func (t *Topic) Delete(ctx context.Context) error { - return t.s.deleteTopic(ctx, t.name) -} - -// Exists reports whether the topic exists on the server. -func (t *Topic) Exists(ctx context.Context) (bool, error) { - if t.name == "_deleted-topic_" { - return false, nil - } - - return t.s.topicExists(ctx, t.name) -} - -// Subscriptions returns an iterator which returns the subscriptions for this topic. -func (t *Topic) Subscriptions(ctx context.Context) *SubscriptionIterator { - // NOTE: zero or more Subscriptions that are ultimately returned by this - // Subscriptions iterator may belong to a different project to t. - return &SubscriptionIterator{ - s: t.s, - stringsIterator: stringsIterator{ - ctx: ctx, - fetch: func(ctx context.Context, tok string) (*stringsPage, error) { - return t.s.listTopicSubscriptions(ctx, t.name, tok) - }, - }, - } -} - -// Publish publishes the supplied Messages to the topic. -// If successful, the server-assigned message IDs are returned in the same order as the supplied Messages. -// At most MaxPublishBatchSize messages may be supplied. -func (t *Topic) Publish(ctx context.Context, msgs ...*Message) ([]string, error) { - if len(msgs) == 0 { - return nil, nil - } - if len(msgs) > MaxPublishBatchSize { - return nil, fmt.Errorf("pubsub: got %d messages, but maximum batch size is %d", len(msgs), MaxPublishBatchSize) - } - return t.s.publishMessages(ctx, t.name, msgs) -} diff --git a/vendor/github.com/NYTimes/gziphandler/BUILD b/vendor/github.com/NYTimes/gziphandler/BUILD index d89ca0e349ea..1f5ab60b24da 100644 --- a/vendor/github.com/NYTimes/gziphandler/BUILD +++ b/vendor/github.com/NYTimes/gziphandler/BUILD @@ -1,14 +1,18 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["gzip.go"], importpath = "github.com/NYTimes/gziphandler", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["gzip_test.go"], + importpath = "github.com/NYTimes/gziphandler", + library = ":go_default_library", + deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], ) filegroup( @@ -22,4 +26,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/NYTimes/gziphandler/gzip_test.go b/vendor/github.com/NYTimes/gziphandler/gzip_test.go new file mode 100644 index 000000000000..9a62bcbaa2b9 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/gzip_test.go @@ -0,0 +1,134 @@ +package gziphandler + +import ( + "bytes" + "compress/gzip" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseEncodings(t *testing.T) { + + examples := map[string]codings{ + + // Examples from RFC 2616 + "compress, gzip": codings{"compress": 1.0, "gzip": 1.0}, + "": codings{}, + "*": codings{"*": 1.0}, + "compress;q=0.5, gzip;q=1.0": codings{"compress": 0.5, "gzip": 1.0}, + "gzip;q=1.0, identity; q=0.5, *;q=0": codings{"gzip": 1.0, "identity": 0.5, "*": 0.0}, + + // More random stuff + "AAA;q=1": codings{"aaa": 1.0}, + "BBB ; q = 2": codings{"bbb": 1.0}, + } + + for eg, exp := range examples { + act, _ := parseEncodings(eg) + assert.Equal(t, exp, act) + } +} + +func TestGzipHandler(t *testing.T) { + testBody := "aaabbbccc" + + // This just exists to provide something for GzipHandler to wrap. + handler := newTestHandler(testBody) + + // requests without accept-encoding are passed along as-is + + req1, _ := http.NewRequest("GET", "/whatever", nil) + res1 := httptest.NewRecorder() + handler.ServeHTTP(res1, req1) + + assert.Equal(t, 200, res1.Code) + assert.Equal(t, "", res1.Header().Get("Content-Encoding")) + assert.Equal(t, "Accept-Encoding", res1.Header().Get("Vary")) + assert.Equal(t, testBody, res1.Body.String()) + + // but requests with accept-encoding:gzip are compressed if possible + + req2, _ := http.NewRequest("GET", "/whatever", nil) + req2.Header.Set("Accept-Encoding", "gzip") + res2 := httptest.NewRecorder() + handler.ServeHTTP(res2, req2) + + assert.Equal(t, 200, res2.Code) + assert.Equal(t, "gzip", res2.Header().Get("Content-Encoding")) + assert.Equal(t, "Accept-Encoding", res2.Header().Get("Vary")) + assert.Equal(t, gzipStr(testBody), res2.Body.Bytes()) + + // content-type header is correctly set based on uncompressed body + + req3, _ := http.NewRequest("GET", "/whatever", nil) + req3.Header.Set("Accept-Encoding", "gzip") + res3 := httptest.NewRecorder() + handler.ServeHTTP(res3, req3) + + assert.Equal(t, http.DetectContentType([]byte(testBody)), res3.Header().Get("Content-Type")) +} + +// -------------------------------------------------------------------- + +func BenchmarkGzipHandler_S2k(b *testing.B) { benchmark(b, false, 2048) } +func BenchmarkGzipHandler_S20k(b *testing.B) { benchmark(b, false, 20480) } +func BenchmarkGzipHandler_S100k(b *testing.B) { benchmark(b, false, 102400) } +func BenchmarkGzipHandler_P2k(b *testing.B) { benchmark(b, true, 2048) } +func BenchmarkGzipHandler_P20k(b *testing.B) { benchmark(b, true, 20480) } +func BenchmarkGzipHandler_P100k(b *testing.B) { benchmark(b, true, 102400) } + +// -------------------------------------------------------------------- + +func gzipStr(s string) []byte { + var b bytes.Buffer + w := gzip.NewWriter(&b) + io.WriteString(w, s) + w.Close() + return b.Bytes() +} + +func benchmark(b *testing.B, parallel bool, size int) { + bin, err := ioutil.ReadFile("testdata/benchmark.json") + if err != nil { + b.Fatal(err) + } + + req, _ := http.NewRequest("GET", "/whatever", nil) + req.Header.Set("Accept-Encoding", "gzip") + handler := newTestHandler(string(bin[:size])) + + if parallel { + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + runBenchmark(b, req, handler) + } + }) + } else { + b.ResetTimer() + for i := 0; i < b.N; i++ { + runBenchmark(b, req, handler) + } + } +} + +func runBenchmark(b *testing.B, req *http.Request, handler http.Handler) { + res := httptest.NewRecorder() + handler.ServeHTTP(res, req) + if code := res.Code; code != 200 { + b.Fatalf("Expected 200 but got %d", code) + } else if blen := res.Body.Len(); blen < 500 { + b.Fatalf("Expected complete response body, but got %d bytes", blen) + } +} + +func newTestHandler(body string) http.Handler { + return GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, body) + })) +} diff --git a/vendor/github.com/arbovm/levenshtein/BUILD b/vendor/github.com/arbovm/levenshtein/BUILD deleted file mode 100644 index 1c14587f3934..000000000000 --- a/vendor/github.com/arbovm/levenshtein/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["levenshtein.go"], - importpath = "github.com/arbovm/levenshtein", -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/github.com/arbovm/levenshtein/LICENSE b/vendor/github.com/arbovm/levenshtein/LICENSE deleted file mode 100644 index 7474a3bd807f..000000000000 --- a/vendor/github.com/arbovm/levenshtein/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2015, Arbo von Monkiewitsch All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/arbovm/levenshtein/README.md b/vendor/github.com/arbovm/levenshtein/README.md deleted file mode 100644 index 61ededc8be5a..000000000000 --- a/vendor/github.com/arbovm/levenshtein/README.md +++ /dev/null @@ -1,36 +0,0 @@ -Levenshtein Distance -==================== - -[Go](http://golang.org) package to calculate the [Levenshtein Distance](http://en.wikipedia.org/wiki/Levenshtein_distance) - -Install -------- - - go get github.com/arbovm/levenshtein - -Example -------- - -```go -package main - -import ( - "fmt" - "github.com/arbovm/levenshtein" -) - -func main() { - s1 := "kitten" - s2 := "sitting" - fmt.Printf("The distance between %v and %v is %v\n", - s1, s2, levenshtein.Distance(s1, s2)) - // -> The distance between kitten and sitting is 3 -} - -``` - -Documentation -------------- - -Located [here](http://godoc.org/github.com/arbovm/levenshtein) - diff --git a/vendor/github.com/arbovm/levenshtein/levenshtein.go b/vendor/github.com/arbovm/levenshtein/levenshtein.go deleted file mode 100644 index 7cccfe43201f..000000000000 --- a/vendor/github.com/arbovm/levenshtein/levenshtein.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2015, Arbo von Monkiewitsch All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package levenshtein - -// The Levenshtein distance between two strings is defined as the minimum -// number of edits needed to transform one string into the other, with the -// allowable edit operations being insertion, deletion, or substitution of -// a single character -// http://en.wikipedia.org/wiki/Levenshtein_distance -// -// This implemention is optimized to use O(min(m,n)) space. -// It is based on the optimized C version found here: -// http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#C -func Distance(str1, str2 string) int { - var cost, lastdiag, olddiag int - s1 := []rune(str1) - s2 := []rune(str2) - - len_s1 := len(s1) - len_s2 := len(s2) - - column := make([]int, len_s1+1) - - for y := 1; y <= len_s1; y++ { - column[y] = y - } - - for x := 1; x <= len_s2; x++ { - column[0] = x - lastdiag = x - 1 - for y := 1; y <= len_s1; y++ { - olddiag = column[y] - cost = 0 - if s1[y-1] != s2[x-1] { - cost = 1 - } - column[y] = min( - column[y]+1, - column[y-1]+1, - lastdiag+cost) - lastdiag = olddiag - } - } - return column[len_s1] -} - -func min(a, b, c int) int { - if a < b { - if a < c { - return a - } - } else { - if b < c { - return b - } - } - return c -} diff --git a/vendor/github.com/beorn7/perks/.gitignore b/vendor/github.com/beorn7/perks/.gitignore new file mode 100644 index 000000000000..1bd9209aa195 --- /dev/null +++ b/vendor/github.com/beorn7/perks/.gitignore @@ -0,0 +1,2 @@ +*.test +*.prof diff --git a/vendor/github.com/beorn7/perks/README.md b/vendor/github.com/beorn7/perks/README.md new file mode 100644 index 000000000000..fc05777701ab --- /dev/null +++ b/vendor/github.com/beorn7/perks/README.md @@ -0,0 +1,31 @@ +# Perks for Go (golang.org) + +Perks contains the Go package quantile that computes approximate quantiles over +an unbounded data stream within low memory and CPU bounds. + +For more information and examples, see: +http://godoc.org/github.com/bmizerany/perks + +A very special thank you and shout out to Graham Cormode (Rutgers University), +Flip Korn (AT&T Labs–Research), S. Muthukrishnan (Rutgers University), and +Divesh Srivastava (AT&T Labs–Research) for their research and publication of +[Effective Computation of Biased Quantiles over Data Streams](http://www.cs.rutgers.edu/~muthu/bquant.pdf) + +Thank you, also: +* Armon Dadgar (@armon) +* Andrew Gerrand (@nf) +* Brad Fitzpatrick (@bradfitz) +* Keith Rarick (@kr) + +FAQ: + +Q: Why not move the quantile package into the project root? +A: I want to add more packages to perks later. + +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/BUILD b/vendor/github.com/beorn7/perks/quantile/BUILD index 7c7587efed01..975dd8d0db53 100644 --- a/vendor/github.com/beorn7/perks/quantile/BUILD +++ b/vendor/github.com/beorn7/perks/quantile/BUILD @@ -1,14 +1,27 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["stream.go"], importpath = "github.com/beorn7/perks/quantile", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = [ + "bench_test.go", + "stream_test.go", + ], + importpath = "github.com/beorn7/perks/quantile", + library = ":go_default_library", +) + +go_test( + name = "go_default_xtest", + srcs = ["example_test.go"], + importpath = "github.com/beorn7/perks/quantile_test", + deps = [":go_default_library"], ) filegroup( @@ -22,4 +35,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/beorn7/perks/quantile/bench_test.go b/vendor/github.com/beorn7/perks/quantile/bench_test.go new file mode 100644 index 000000000000..0bd0e4e77521 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/bench_test.go @@ -0,0 +1,63 @@ +package quantile + +import ( + "testing" +) + +func BenchmarkInsertTargeted(b *testing.B) { + b.ReportAllocs() + + s := NewTargeted(Targets) + b.ResetTimer() + for i := float64(0); i < float64(b.N); i++ { + s.Insert(i) + } +} + +func BenchmarkInsertTargetedSmallEpsilon(b *testing.B) { + s := NewTargeted(TargetsSmallEpsilon) + b.ResetTimer() + for i := float64(0); i < float64(b.N); i++ { + s.Insert(i) + } +} + +func BenchmarkInsertBiased(b *testing.B) { + s := NewLowBiased(0.01) + b.ResetTimer() + for i := float64(0); i < float64(b.N); i++ { + s.Insert(i) + } +} + +func BenchmarkInsertBiasedSmallEpsilon(b *testing.B) { + s := NewLowBiased(0.0001) + b.ResetTimer() + for i := float64(0); i < float64(b.N); i++ { + s.Insert(i) + } +} + +func BenchmarkQuery(b *testing.B) { + s := NewTargeted(Targets) + for i := float64(0); i < 1e6; i++ { + s.Insert(i) + } + b.ResetTimer() + n := float64(b.N) + for i := float64(0); i < n; i++ { + s.Query(i / n) + } +} + +func BenchmarkQuerySmallEpsilon(b *testing.B) { + s := NewTargeted(TargetsSmallEpsilon) + for i := float64(0); i < 1e6; i++ { + s.Insert(i) + } + b.ResetTimer() + n := float64(b.N) + for i := float64(0); i < n; i++ { + s.Query(i / n) + } +} diff --git a/vendor/github.com/beorn7/perks/quantile/example_test.go b/vendor/github.com/beorn7/perks/quantile/example_test.go new file mode 100644 index 000000000000..ab3293aaf2a0 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/example_test.go @@ -0,0 +1,121 @@ +// +build go1.1 + +package quantile_test + +import ( + "bufio" + "fmt" + "log" + "os" + "strconv" + "time" + + "github.com/beorn7/perks/quantile" +) + +func Example_simple() { + ch := make(chan float64) + go sendFloats(ch) + + // Compute the 50th, 90th, and 99th percentile. + q := quantile.NewTargeted(map[float64]float64{ + 0.50: 0.005, + 0.90: 0.001, + 0.99: 0.0001, + }) + for v := range ch { + q.Insert(v) + } + + fmt.Println("perc50:", q.Query(0.50)) + fmt.Println("perc90:", q.Query(0.90)) + fmt.Println("perc99:", q.Query(0.99)) + fmt.Println("count:", q.Count()) + // Output: + // perc50: 5 + // perc90: 16 + // perc99: 223 + // count: 2388 +} + +func Example_mergeMultipleStreams() { + // Scenario: + // We have multiple database shards. On each shard, there is a process + // collecting query response times from the database logs and inserting + // them into a Stream (created via NewTargeted(0.90)), much like the + // Simple example. These processes expose a network interface for us to + // ask them to serialize and send us the results of their + // Stream.Samples so we may Merge and Query them. + // + // NOTES: + // * These sample sets are small, allowing us to get them + // across the network much faster than sending the entire list of data + // points. + // + // * For this to work correctly, we must supply the same quantiles + // a priori the process collecting the samples supplied to NewTargeted, + // even if we do not plan to query them all here. + ch := make(chan quantile.Samples) + getDBQuerySamples(ch) + q := quantile.NewTargeted(map[float64]float64{0.90: 0.001}) + for samples := range ch { + q.Merge(samples) + } + fmt.Println("perc90:", q.Query(0.90)) +} + +func Example_window() { + // Scenario: We want the 90th, 95th, and 99th percentiles for each + // minute. + + ch := make(chan float64) + go sendStreamValues(ch) + + tick := time.NewTicker(1 * time.Minute) + q := quantile.NewTargeted(map[float64]float64{ + 0.90: 0.001, + 0.95: 0.0005, + 0.99: 0.0001, + }) + for { + select { + case t := <-tick.C: + flushToDB(t, q.Samples()) + q.Reset() + case v := <-ch: + q.Insert(v) + } + } +} + +func sendStreamValues(ch chan float64) { + // Use your imagination +} + +func flushToDB(t time.Time, samples quantile.Samples) { + // Use your imagination +} + +// This is a stub for the above example. In reality this would hit the remote +// servers via http or something like it. +func getDBQuerySamples(ch chan quantile.Samples) {} + +func sendFloats(ch chan<- float64) { + f, err := os.Open("exampledata.txt") + if err != nil { + log.Fatal(err) + } + sc := bufio.NewScanner(f) + for sc.Scan() { + b := sc.Bytes() + v, err := strconv.ParseFloat(string(b), 64) + if err != nil { + log.Fatal(err) + } + ch <- v + } + if sc.Err() != nil { + log.Fatal(sc.Err()) + } + close(ch) +} diff --git a/vendor/github.com/beorn7/perks/quantile/stream_test.go b/vendor/github.com/beorn7/perks/quantile/stream_test.go new file mode 100644 index 000000000000..4dba05449cb4 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream_test.go @@ -0,0 +1,188 @@ +package quantile + +import ( + "math" + "math/rand" + "sort" + "testing" +) + +var ( + Targets = map[float64]float64{ + 0.01: 0.001, + 0.10: 0.01, + 0.50: 0.05, + 0.90: 0.01, + 0.99: 0.001, + } + TargetsSmallEpsilon = map[float64]float64{ + 0.01: 0.0001, + 0.10: 0.001, + 0.50: 0.005, + 0.90: 0.001, + 0.99: 0.0001, + } + LowQuantiles = []float64{0.01, 0.1, 0.5} + HighQuantiles = []float64{0.99, 0.9, 0.5} +) + +const RelativeEpsilon = 0.01 + +func verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) { + sort.Float64s(a) + for quantile, epsilon := range Targets { + n := float64(len(a)) + k := int(quantile * n) + lower := int((quantile - epsilon) * n) + if lower < 1 { + lower = 1 + } + upper := int(math.Ceil((quantile + epsilon) * n)) + if upper > len(a) { + upper = len(a) + } + w, min, max := a[k-1], a[lower-1], a[upper-1] + if g := s.Query(quantile); g < min || g > max { + t.Errorf("q=%f: want %v [%f,%f], got %v", quantile, w, min, max, g) + } + } +} + +func verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) { + sort.Float64s(a) + for _, qu := range LowQuantiles { + n := float64(len(a)) + k := int(qu * n) + + lowerRank := int((1 - RelativeEpsilon) * qu * n) + upperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n)) + w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1] + if g := s.Query(qu); g < min || g > max { + t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g) + } + } +} + +func verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) { + sort.Float64s(a) + for _, qu := range HighQuantiles { + n := float64(len(a)) + k := int(qu * n) + + lowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n) + upperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n)) + w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1] + if g := s.Query(qu); g < min || g > max { + t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g) + } + } +} + +func populateStream(s *Stream) []float64 { + a := make([]float64, 0, 1e5+100) + for i := 0; i < cap(a); i++ { + v := rand.NormFloat64() + // Add 5% asymmetric outliers. + if i%20 == 0 { + v = v*v + 1 + } + s.Insert(v) + a = append(a, v) + } + return a +} + +func TestTargetedQuery(t *testing.T) { + rand.Seed(42) + s := NewTargeted(Targets) + a := populateStream(s) + verifyPercsWithAbsoluteEpsilon(t, a, s) +} + +func TestLowBiasedQuery(t *testing.T) { + rand.Seed(42) + s := NewLowBiased(RelativeEpsilon) + a := populateStream(s) + verifyLowPercsWithRelativeEpsilon(t, a, s) +} + +func TestHighBiasedQuery(t *testing.T) { + rand.Seed(42) + s := NewHighBiased(RelativeEpsilon) + a := populateStream(s) + verifyHighPercsWithRelativeEpsilon(t, a, s) +} + +// BrokenTestTargetedMerge is broken, see Merge doc comment. +func BrokenTestTargetedMerge(t *testing.T) { + rand.Seed(42) + s1 := NewTargeted(Targets) + s2 := NewTargeted(Targets) + a := populateStream(s1) + a = append(a, populateStream(s2)...) + s1.Merge(s2.Samples()) + verifyPercsWithAbsoluteEpsilon(t, a, s1) +} + +// BrokenTestLowBiasedMerge is broken, see Merge doc comment. +func BrokenTestLowBiasedMerge(t *testing.T) { + rand.Seed(42) + s1 := NewLowBiased(RelativeEpsilon) + s2 := NewLowBiased(RelativeEpsilon) + a := populateStream(s1) + a = append(a, populateStream(s2)...) + s1.Merge(s2.Samples()) + verifyLowPercsWithRelativeEpsilon(t, a, s2) +} + +// BrokenTestHighBiasedMerge is broken, see Merge doc comment. +func BrokenTestHighBiasedMerge(t *testing.T) { + rand.Seed(42) + s1 := NewHighBiased(RelativeEpsilon) + s2 := NewHighBiased(RelativeEpsilon) + a := populateStream(s1) + a = append(a, populateStream(s2)...) + s1.Merge(s2.Samples()) + verifyHighPercsWithRelativeEpsilon(t, a, s2) +} + +func TestUncompressed(t *testing.T) { + q := NewTargeted(Targets) + for i := 100; i > 0; i-- { + q.Insert(float64(i)) + } + if g := q.Count(); g != 100 { + t.Errorf("want count 100, got %d", g) + } + // Before compression, Query should have 100% accuracy. + for quantile := range Targets { + w := quantile * 100 + if g := q.Query(quantile); g != w { + t.Errorf("want %f, got %f", w, g) + } + } +} + +func TestUncompressedSamples(t *testing.T) { + q := NewTargeted(map[float64]float64{0.99: 0.001}) + for i := 1; i <= 100; i++ { + q.Insert(float64(i)) + } + if g := q.Samples().Len(); g != 100 { + t.Errorf("want count 100, got %d", g) + } +} + +func TestUncompressedOne(t *testing.T) { + q := NewTargeted(map[float64]float64{0.99: 0.01}) + q.Insert(3.14) + if g := q.Query(0.90); g != 3.14 { + t.Error("want PI, got", g) + } +} + +func TestDefaults(t *testing.T) { + if g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 { + t.Errorf("want 0, got %f", g) + } +} diff --git a/vendor/github.com/bwmarrin/snowflake/BUILD b/vendor/github.com/bwmarrin/snowflake/BUILD index 15a52c8354b3..6d63ff571708 100644 --- a/vendor/github.com/bwmarrin/snowflake/BUILD +++ b/vendor/github.com/bwmarrin/snowflake/BUILD @@ -1,14 +1,17 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["snowflake.go"], importpath = "github.com/bwmarrin/snowflake", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["snowflake_test.go"], + importpath = "github.com/bwmarrin/snowflake", + library = ":go_default_library", ) filegroup( @@ -22,4 +25,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/bwmarrin/snowflake/snowflake_test.go b/vendor/github.com/bwmarrin/snowflake/snowflake_test.go new file mode 100644 index 000000000000..f2ba51df8ca9 --- /dev/null +++ b/vendor/github.com/bwmarrin/snowflake/snowflake_test.go @@ -0,0 +1,124 @@ +package snowflake + +import ( + "bytes" + "testing" +) + +func TestMarshalJSON(t *testing.T) { + id := ID(13587) + expected := "\"13587\"" + + bytes, err := id.MarshalJSON() + if err != nil { + t.Error("Unexpected error during MarshalJSON") + } + + if string(bytes) != expected { + t.Errorf("Got %s, expected %s", string(bytes), expected) + } +} + +func TestMarshalsIntBytes(t *testing.T) { + id := ID(13587).IntBytes() + expected := []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x35, 0x13} + if !bytes.Equal(id[:], expected) { + t.Errorf("Expected ID to be encoded as %v, got %v", expected, id) + } +} + +func TestUnmarshalJSON(t *testing.T) { + strID := "\"13587\"" + expected := ID(13587) + + var id ID + err := id.UnmarshalJSON([]byte(strID)) + if err != nil { + t.Error("Unexpected error during UnmarshalJSON") + } + + if id != expected { + t.Errorf("Got %d, expected %d", id, expected) + } +} + +func TestBase58(t *testing.T) { + + node, _ := NewNode(1) + + for i := 0; i < 10; i++ { + + sf := node.Generate() + b58 := sf.Base58() + psf, err := ParseBase58([]byte(b58)) + if err != nil { + t.Fatal(err) + } + if sf != psf { + t.Fatal("Parsed does not match String.") + } + } +} +func BenchmarkParseBase58(b *testing.B) { + + node, _ := NewNode(1) + sf := node.Generate() + b58 := sf.Base58() + + b.ReportAllocs() + + b.ResetTimer() + for n := 0; n < b.N; n++ { + ParseBase58([]byte(b58)) + } +} +func BenchmarkBase58(b *testing.B) { + + node, _ := NewNode(1) + sf := node.Generate() + + b.ReportAllocs() + + b.ResetTimer() + for n := 0; n < b.N; n++ { + sf.Base58() + } +} +func BenchmarkGenerate(b *testing.B) { + + node, _ := NewNode(1) + + b.ReportAllocs() + + b.ResetTimer() + for n := 0; n < b.N; n++ { + _ = node.Generate() + } +} + +func BenchmarkUnmarshal(b *testing.B) { + // Generate the ID to unmarshal + node, _ := NewNode(1) + id := node.Generate() + bytes, _ := id.MarshalJSON() + + var id2 ID + + b.ReportAllocs() + b.ResetTimer() + for n := 0; n < b.N; n++ { + _ = id2.UnmarshalJSON(bytes) + } +} + +func BenchmarkMarshal(b *testing.B) { + // Generate the ID to marshal + node, _ := NewNode(1) + id := node.Generate() + + b.ReportAllocs() + b.ResetTimer() + for n := 0; n < b.N; n++ { + _, _ = id.MarshalJSON() + } +} diff --git a/vendor/github.com/ghodss/yaml/BUILD b/vendor/github.com/ghodss/yaml/BUILD index 927391670f40..b3f2ebaa8789 100644 --- a/vendor/github.com/ghodss/yaml/BUILD +++ b/vendor/github.com/ghodss/yaml/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -12,9 +7,17 @@ go_library( "yaml.go", ], importpath = "github.com/ghodss/yaml", + visibility = ["//visibility:public"], deps = ["//vendor/gopkg.in/yaml.v2:go_default_library"], ) +go_test( + name = "go_default_test", + srcs = ["yaml_test.go"], + importpath = "github.com/ghodss/yaml", + library = ":go_default_library", +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -26,4 +29,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/ghodss/yaml/yaml_test.go b/vendor/github.com/ghodss/yaml/yaml_test.go new file mode 100644 index 000000000000..0ae0954e9013 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml_test.go @@ -0,0 +1,271 @@ +package yaml + +import ( + "fmt" + "math" + "reflect" + "strconv" + "testing" +) + +type MarshalTest struct { + A string + B int64 + // Would like to test float64, but it's not supported in go-yaml. + // (See https://github.com/go-yaml/yaml/issues/83.) + C float32 +} + +func TestMarshal(t *testing.T) { + f32String := strconv.FormatFloat(math.MaxFloat32, 'g', -1, 32) + s := MarshalTest{"a", math.MaxInt64, math.MaxFloat32} + e := []byte(fmt.Sprintf("A: a\nB: %d\nC: %s\n", math.MaxInt64, f32String)) + + y, err := Marshal(s) + if err != nil { + t.Errorf("error marshaling YAML: %v", err) + } + + if !reflect.DeepEqual(y, e) { + t.Errorf("marshal YAML was unsuccessful, expected: %#v, got: %#v", + string(e), string(y)) + } +} + +type UnmarshalString struct { + A string + True string +} + +type UnmarshalStringMap struct { + A map[string]string +} + +type UnmarshalNestedString struct { + A NestedString +} + +type NestedString struct { + A string +} + +type UnmarshalSlice struct { + A []NestedSlice +} + +type NestedSlice struct { + B string + C *string +} + +func TestUnmarshal(t *testing.T) { + y := []byte("a: 1") + s1 := UnmarshalString{} + e1 := UnmarshalString{A: "1"} + unmarshal(t, y, &s1, &e1) + + y = []byte("a: true") + s1 = UnmarshalString{} + e1 = UnmarshalString{A: "true"} + unmarshal(t, y, &s1, &e1) + + y = []byte("true: 1") + s1 = UnmarshalString{} + e1 = UnmarshalString{True: "1"} + unmarshal(t, y, &s1, &e1) + + y = []byte("a:\n a: 1") + s2 := UnmarshalNestedString{} + e2 := UnmarshalNestedString{NestedString{"1"}} + unmarshal(t, y, &s2, &e2) + + y = []byte("a:\n - b: abc\n c: def\n - b: 123\n c: 456\n") + s3 := UnmarshalSlice{} + e3 := UnmarshalSlice{[]NestedSlice{NestedSlice{"abc", strPtr("def")}, NestedSlice{"123", strPtr("456")}}} + unmarshal(t, y, &s3, &e3) + + y = []byte("a:\n b: 1") + s4 := UnmarshalStringMap{} + e4 := UnmarshalStringMap{map[string]string{"b": "1"}} + unmarshal(t, y, &s4, &e4) +} + +func unmarshal(t *testing.T, y []byte, s, e interface{}) { + err := Unmarshal(y, s) + if err != nil { + t.Errorf("error unmarshaling YAML: %v", err) + } + + if !reflect.DeepEqual(s, e) { + t.Errorf("unmarshal YAML was unsuccessful, expected: %+#v, got: %+#v", + e, s) + } +} + +type Case struct { + input string + output string + // By default we test that reversing the output == input. But if there is a + // difference in the reversed output, you can optionally specify it here. + reverse *string +} + +type RunType int + +const ( + RunTypeJSONToYAML RunType = iota + RunTypeYAMLToJSON +) + +func TestJSONToYAML(t *testing.T) { + cases := []Case{ + { + `{"t":"a"}`, + "t: a\n", + nil, + }, { + `{"t":null}`, + "t: null\n", + nil, + }, + } + + runCases(t, RunTypeJSONToYAML, cases) +} + +func TestYAMLToJSON(t *testing.T) { + cases := []Case{ + { + "t: a\n", + `{"t":"a"}`, + nil, + }, { + "t: \n", + `{"t":null}`, + strPtr("t: null\n"), + }, { + "t: null\n", + `{"t":null}`, + nil, + }, { + "1: a\n", + `{"1":"a"}`, + strPtr("\"1\": a\n"), + }, { + "1000000000000000000000000000000000000: a\n", + `{"1e+36":"a"}`, + strPtr("\"1e+36\": a\n"), + }, { + "1e+36: a\n", + `{"1e+36":"a"}`, + strPtr("\"1e+36\": a\n"), + }, { + "\"1e+36\": a\n", + `{"1e+36":"a"}`, + nil, + }, { + "\"1.2\": a\n", + `{"1.2":"a"}`, + nil, + }, { + "- t: a\n", + `[{"t":"a"}]`, + nil, + }, { + "- t: a\n" + + "- t:\n" + + " b: 1\n" + + " c: 2\n", + `[{"t":"a"},{"t":{"b":1,"c":2}}]`, + nil, + }, { + `[{t: a}, {t: {b: 1, c: 2}}]`, + `[{"t":"a"},{"t":{"b":1,"c":2}}]`, + strPtr("- t: a\n" + + "- t:\n" + + " b: 1\n" + + " c: 2\n"), + }, { + "- t: \n", + `[{"t":null}]`, + strPtr("- t: null\n"), + }, { + "- t: null\n", + `[{"t":null}]`, + nil, + }, + } + + // Cases that should produce errors. + _ = []Case{ + { + "~: a", + `{"null":"a"}`, + nil, + }, { + "a: !!binary gIGC\n", + "{\"a\":\"\x80\x81\x82\"}", + nil, + }, + } + + runCases(t, RunTypeYAMLToJSON, cases) +} + +func runCases(t *testing.T, runType RunType, cases []Case) { + var f func([]byte) ([]byte, error) + var invF func([]byte) ([]byte, error) + var msg string + var invMsg string + if runType == RunTypeJSONToYAML { + f = JSONToYAML + invF = YAMLToJSON + msg = "JSON to YAML" + invMsg = "YAML back to JSON" + } else { + f = YAMLToJSON + invF = JSONToYAML + msg = "YAML to JSON" + invMsg = "JSON back to YAML" + } + + for _, c := range cases { + // Convert the string. + t.Logf("converting %s\n", c.input) + output, err := f([]byte(c.input)) + if err != nil { + t.Errorf("Failed to convert %s, input: `%s`, err: %v", msg, c.input, err) + } + + // Check it against the expected output. + if string(output) != c.output { + t.Errorf("Failed to convert %s, input: `%s`, expected `%s`, got `%s`", + msg, c.input, c.output, string(output)) + } + + // Set the string that we will compare the reversed output to. + reverse := c.input + // If a special reverse string was specified, use that instead. + if c.reverse != nil { + reverse = *c.reverse + } + + // Reverse the output. + input, err := invF(output) + if err != nil { + t.Errorf("Failed to convert %s, input: `%s`, err: %v", invMsg, string(output), err) + } + + // Check the reverse is equal to the input (or to *c.reverse). + if string(input) != reverse { + t.Errorf("Failed to convert %s, input: `%s`, expected `%s`, got `%s`", + invMsg, string(output), reverse, string(input)) + } + } + +} + +// To be able to easily fill in the *Case.reverse string above. +func strPtr(s string) *string { + return &s +} diff --git a/vendor/github.com/go-sql-driver/mysql/BUILD b/vendor/github.com/go-sql-driver/mysql/BUILD index 7081a62baa7e..e5999086ca6c 100644 --- a/vendor/github.com/go-sql-driver/mysql/BUILD +++ b/vendor/github.com/go-sql-driver/mysql/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -24,6 +19,20 @@ go_library( "utils.go", ], importpath = "github.com/go-sql-driver/mysql", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = [ + "benchmark_test.go", + "driver_test.go", + "dsn_test.go", + "errors_test.go", + "utils_test.go", + ], + importpath = "github.com/go-sql-driver/mysql", + library = ":go_default_library", ) filegroup( @@ -37,4 +46,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/go-sql-driver/mysql/benchmark_test.go b/vendor/github.com/go-sql-driver/mysql/benchmark_test.go new file mode 100644 index 000000000000..8f721139b994 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/benchmark_test.go @@ -0,0 +1,246 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "math" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +type TB testing.B + +func (tb *TB) check(err error) { + if err != nil { + tb.Fatal(err) + } +} + +func (tb *TB) checkDB(db *sql.DB, err error) *sql.DB { + tb.check(err) + return db +} + +func (tb *TB) checkRows(rows *sql.Rows, err error) *sql.Rows { + tb.check(err) + return rows +} + +func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt { + tb.check(err) + return stmt +} + +func initDB(b *testing.B, queries ...string) *sql.DB { + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + for _, query := range queries { + if _, err := db.Exec(query); err != nil { + if w, ok := err.(MySQLWarnings); ok { + b.Logf("warning on %q: %v", query, w) + } else { + b.Fatalf("error on %q: %v", query, err) + } + } + } + return db +} + +const concurrencyLevel = 10 + +func BenchmarkQuery(b *testing.B) { + tb := (*TB)(b) + b.StopTimer() + b.ReportAllocs() + db := initDB(b, + "DROP TABLE IF EXISTS foo", + "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))", + `INSERT INTO foo VALUES (1, "one")`, + `INSERT INTO foo VALUES (2, "two")`, + ) + db.SetMaxIdleConns(concurrencyLevel) + defer db.Close() + + stmt := tb.checkStmt(db.Prepare("SELECT val FROM foo WHERE id=?")) + defer stmt.Close() + + remain := int64(b.N) + var wg sync.WaitGroup + wg.Add(concurrencyLevel) + defer wg.Wait() + b.StartTimer() + + for i := 0; i < concurrencyLevel; i++ { + go func() { + for { + if atomic.AddInt64(&remain, -1) < 0 { + wg.Done() + return + } + + var got string + tb.check(stmt.QueryRow(1).Scan(&got)) + if got != "one" { + b.Errorf("query = %q; want one", got) + wg.Done() + return + } + } + }() + } +} + +func BenchmarkExec(b *testing.B) { + tb := (*TB)(b) + b.StopTimer() + b.ReportAllocs() + db := tb.checkDB(sql.Open("mysql", dsn)) + db.SetMaxIdleConns(concurrencyLevel) + defer db.Close() + + stmt := tb.checkStmt(db.Prepare("DO 1")) + defer stmt.Close() + + remain := int64(b.N) + var wg sync.WaitGroup + wg.Add(concurrencyLevel) + defer wg.Wait() + b.StartTimer() + + for i := 0; i < concurrencyLevel; i++ { + go func() { + for { + if atomic.AddInt64(&remain, -1) < 0 { + wg.Done() + return + } + + if _, err := stmt.Exec(); err != nil { + b.Fatal(err.Error()) + } + } + }() + } +} + +// data, but no db writes +var roundtripSample []byte + +func initRoundtripBenchmarks() ([]byte, int, int) { + if roundtripSample == nil { + roundtripSample = []byte(strings.Repeat("0123456789abcdef", 1024*1024)) + } + return roundtripSample, 16, len(roundtripSample) +} + +func BenchmarkRoundtripTxt(b *testing.B) { + b.StopTimer() + sample, min, max := initRoundtripBenchmarks() + sampleString := string(sample) + b.ReportAllocs() + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + defer db.Close() + b.StartTimer() + var result string + for i := 0; i < b.N; i++ { + length := min + i + if length > max { + length = max + } + test := sampleString[0:length] + rows := tb.checkRows(db.Query(`SELECT "` + test + `"`)) + if !rows.Next() { + rows.Close() + b.Fatalf("crashed") + } + err := rows.Scan(&result) + if err != nil { + rows.Close() + b.Fatalf("crashed") + } + if result != test { + rows.Close() + b.Errorf("mismatch") + } + rows.Close() + } +} + +func BenchmarkRoundtripBin(b *testing.B) { + b.StopTimer() + sample, min, max := initRoundtripBenchmarks() + b.ReportAllocs() + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + defer db.Close() + stmt := tb.checkStmt(db.Prepare("SELECT ?")) + defer stmt.Close() + b.StartTimer() + var result sql.RawBytes + for i := 0; i < b.N; i++ { + length := min + i + if length > max { + length = max + } + test := sample[0:length] + rows := tb.checkRows(stmt.Query(test)) + if !rows.Next() { + rows.Close() + b.Fatalf("crashed") + } + err := rows.Scan(&result) + if err != nil { + rows.Close() + b.Fatalf("crashed") + } + if !bytes.Equal(result, test) { + rows.Close() + b.Errorf("mismatch") + } + rows.Close() + } +} + +func BenchmarkInterpolation(b *testing.B) { + mc := &mysqlConn{ + cfg: &Config{ + InterpolateParams: true, + Loc: time.UTC, + }, + maxPacketAllowed: maxPacketSize, + maxWriteSize: maxPacketSize - 1, + buf: newBuffer(nil), + } + + args := []driver.Value{ + int64(42424242), + float64(math.Pi), + false, + time.Unix(1423411542, 807015000), + []byte("bytes containing special chars ' \" \a \x00"), + "string containing special chars ' \" \a \x00", + } + q := "SELECT ?, ?, ?, ?, ?, ?" + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := mc.interpolateParams(q, args) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/driver_test.go b/vendor/github.com/go-sql-driver/mysql/driver_test.go new file mode 100644 index 000000000000..efbc4925c618 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/driver_test.go @@ -0,0 +1,1857 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/url" + "os" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +var ( + user string + pass string + prot string + addr string + dbname string + dsn string + netAddr string + available bool +) + +var ( + tDate = time.Date(2012, 6, 14, 0, 0, 0, 0, time.UTC) + sDate = "2012-06-14" + tDateTime = time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC) + sDateTime = "2011-11-20 21:27:37" + tDate0 = time.Time{} + sDate0 = "0000-00-00" + sDateTime0 = "0000-00-00 00:00:00" +) + +// See https://github.com/go-sql-driver/mysql/wiki/Testing +func init() { + // get environment variables + env := func(key, defaultValue string) string { + if value := os.Getenv(key); value != "" { + return value + } + return defaultValue + } + user = env("MYSQL_TEST_USER", "root") + pass = env("MYSQL_TEST_PASS", "") + prot = env("MYSQL_TEST_PROT", "tcp") + addr = env("MYSQL_TEST_ADDR", "localhost:3306") + dbname = env("MYSQL_TEST_DBNAME", "gotest") + netAddr = fmt.Sprintf("%s(%s)", prot, addr) + dsn = fmt.Sprintf("%s:%s@%s/%s?timeout=30s&strict=true", user, pass, netAddr, dbname) + c, err := net.Dial(prot, addr) + if err == nil { + available = true + c.Close() + } +} + +type DBTest struct { + *testing.T + db *sql.DB +} + +func runTestsWithMultiStatement(t *testing.T, dsn string, tests ...func(dbt *DBTest)) { + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + dsn += "&multiStatements=true" + var db *sql.DB + if _, err := ParseDSN(dsn); err != errInvalidDSNUnsafeCollation { + db, err = sql.Open("mysql", dsn) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db.Close() + } + + dbt := &DBTest{t, db} + for _, test := range tests { + test(dbt) + dbt.db.Exec("DROP TABLE IF EXISTS test") + } +} + +func runTests(t *testing.T, dsn string, tests ...func(dbt *DBTest)) { + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + db, err := sql.Open("mysql", dsn) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db.Close() + + db.Exec("DROP TABLE IF EXISTS test") + + dsn2 := dsn + "&interpolateParams=true" + var db2 *sql.DB + if _, err := ParseDSN(dsn2); err != errInvalidDSNUnsafeCollation { + db2, err = sql.Open("mysql", dsn2) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db2.Close() + } + + dsn3 := dsn + "&multiStatements=true" + var db3 *sql.DB + if _, err := ParseDSN(dsn3); err != errInvalidDSNUnsafeCollation { + db3, err = sql.Open("mysql", dsn3) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db3.Close() + } + + dbt := &DBTest{t, db} + dbt2 := &DBTest{t, db2} + dbt3 := &DBTest{t, db3} + for _, test := range tests { + test(dbt) + dbt.db.Exec("DROP TABLE IF EXISTS test") + if db2 != nil { + test(dbt2) + dbt2.db.Exec("DROP TABLE IF EXISTS test") + } + if db3 != nil { + test(dbt3) + dbt3.db.Exec("DROP TABLE IF EXISTS test") + } + } +} + +func (dbt *DBTest) fail(method, query string, err error) { + if len(query) > 300 { + query = "[query too large to print]" + } + dbt.Fatalf("error on %s %s: %s", method, query, err.Error()) +} + +func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result) { + res, err := dbt.db.Exec(query, args...) + if err != nil { + dbt.fail("exec", query, err) + } + return res +} + +func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows) { + rows, err := dbt.db.Query(query, args...) + if err != nil { + dbt.fail("query", query, err) + } + return rows +} + +func TestEmptyQuery(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // just a comment, no query + rows := dbt.mustQuery("--") + // will hang before #255 + if rows.Next() { + dbt.Errorf("next on rows must be false") + } + }) +} + +func TestCRUD(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // Create Table + dbt.mustExec("CREATE TABLE test (value BOOL)") + + // Test for unexpected data + var out bool + rows := dbt.mustQuery("SELECT * FROM test") + if rows.Next() { + dbt.Error("unexpected data in empty table") + } + + // Create Data + res := dbt.mustExec("INSERT INTO test VALUES (1)") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("expected 1 affected row, got %d", count) + } + + id, err := res.LastInsertId() + if err != nil { + dbt.Fatalf("res.LastInsertId() returned error: %s", err.Error()) + } + if id != 0 { + dbt.Fatalf("expected InsertId 0, got %d", id) + } + + // Read + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if true != out { + dbt.Errorf("true != %t", out) + } + + if rows.Next() { + dbt.Error("unexpected data") + } + } else { + dbt.Error("no data") + } + + // Update + res = dbt.mustExec("UPDATE test SET value = ? WHERE value = ?", false, true) + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("expected 1 affected row, got %d", count) + } + + // Check Update + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if false != out { + dbt.Errorf("false != %t", out) + } + + if rows.Next() { + dbt.Error("unexpected data") + } + } else { + dbt.Error("no data") + } + + // Delete + res = dbt.mustExec("DELETE FROM test WHERE value = ?", false) + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("expected 1 affected row, got %d", count) + } + + // Check for unexpected rows + res = dbt.mustExec("DELETE FROM test") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 0 { + dbt.Fatalf("expected 0 affected row, got %d", count) + } + }) +} + +func TestMultiQuery(t *testing.T) { + runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) { + // Create Table + dbt.mustExec("CREATE TABLE `test` (`id` int(11) NOT NULL, `value` int(11) NOT NULL) ") + + // Create Data + res := dbt.mustExec("INSERT INTO test VALUES (1, 1)") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("expected 1 affected row, got %d", count) + } + + // Update + res = dbt.mustExec("UPDATE test SET value = 3 WHERE id = 1; UPDATE test SET value = 4 WHERE id = 1; UPDATE test SET value = 5 WHERE id = 1;") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("expected 1 affected row, got %d", count) + } + + // Read + var out int + rows := dbt.mustQuery("SELECT value FROM test WHERE id=1;") + if rows.Next() { + rows.Scan(&out) + if 5 != out { + dbt.Errorf("5 != %d", out) + } + + if rows.Next() { + dbt.Error("unexpected data") + } + } else { + dbt.Error("no data") + } + + }) +} + +func TestInt(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [5]string{"TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT"} + in := int64(42) + var out int64 + var rows *sql.Rows + + // SIGNED + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ")") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %d != %d", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + + // UNSIGNED ZEROFILL + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + " ZEROFILL)") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s ZEROFILL: %d != %d", v, in, out) + } + } else { + dbt.Errorf("%s ZEROFILL: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestFloat32(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [2]string{"FLOAT", "DOUBLE"} + in := float32(42.23) + var out float32 + var rows *sql.Rows + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ")") + dbt.mustExec("INSERT INTO test VALUES (?)", in) + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %g != %g", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestFloat64(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [2]string{"FLOAT", "DOUBLE"} + var expected float64 = 42.23 + var out float64 + var rows *sql.Rows + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ")") + dbt.mustExec("INSERT INTO test VALUES (42.23)") + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if expected != out { + dbt.Errorf("%s: %g != %g", v, expected, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestFloat64Placeholder(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [2]string{"FLOAT", "DOUBLE"} + var expected float64 = 42.23 + var out float64 + var rows *sql.Rows + for _, v := range types { + dbt.mustExec("CREATE TABLE test (id int, value " + v + ")") + dbt.mustExec("INSERT INTO test VALUES (1, 42.23)") + rows = dbt.mustQuery("SELECT value FROM test WHERE id = ?", 1) + if rows.Next() { + rows.Scan(&out) + if expected != out { + dbt.Errorf("%s: %g != %g", v, expected, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestString(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [6]string{"CHAR(255)", "VARCHAR(255)", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT"} + in := "κόσμε üöäßñóùéàâÿœ'îë Árvíztűrő いろはにほへとちりぬるを イロハニホヘト דג סקרן чащах น่าฟังเอย" + var out string + var rows *sql.Rows + + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ") CHARACTER SET utf8") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %s != %s", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + + // BLOB + dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8") + + id := 2 + in = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " + + "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " + + "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " + + "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. " + + "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " + + "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " + + "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " + + "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet." + dbt.mustExec("INSERT INTO test VALUES (?, ?)", id, in) + + err := dbt.db.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&out) + if err != nil { + dbt.Fatalf("Error on BLOB-Query: %s", err.Error()) + } else if out != in { + dbt.Errorf("BLOB: %s != %s", in, out) + } + }) +} + +type timeTests struct { + dbtype string + tlayout string + tests []timeTest +} + +type timeTest struct { + s string // leading "!": do not use t as value in queries + t time.Time +} + +type timeMode byte + +func (t timeMode) String() string { + switch t { + case binaryString: + return "binary:string" + case binaryTime: + return "binary:time.Time" + case textString: + return "text:string" + } + panic("unsupported timeMode") +} + +func (t timeMode) Binary() bool { + switch t { + case binaryString, binaryTime: + return true + } + return false +} + +const ( + binaryString timeMode = iota + binaryTime + textString +) + +func (t timeTest) genQuery(dbtype string, mode timeMode) string { + var inner string + if mode.Binary() { + inner = "?" + } else { + inner = `"%s"` + } + return `SELECT cast(` + inner + ` as ` + dbtype + `)` +} + +func (t timeTest) run(dbt *DBTest, dbtype, tlayout string, mode timeMode) { + var rows *sql.Rows + query := t.genQuery(dbtype, mode) + switch mode { + case binaryString: + rows = dbt.mustQuery(query, t.s) + case binaryTime: + rows = dbt.mustQuery(query, t.t) + case textString: + query = fmt.Sprintf(query, t.s) + rows = dbt.mustQuery(query) + default: + panic("unsupported mode") + } + defer rows.Close() + var err error + if !rows.Next() { + err = rows.Err() + if err == nil { + err = fmt.Errorf("no data") + } + dbt.Errorf("%s [%s]: %s", dbtype, mode, err) + return + } + var dst interface{} + err = rows.Scan(&dst) + if err != nil { + dbt.Errorf("%s [%s]: %s", dbtype, mode, err) + return + } + switch val := dst.(type) { + case []uint8: + str := string(val) + if str == t.s { + return + } + if mode.Binary() && dbtype == "DATETIME" && len(str) == 26 && str[:19] == t.s { + // a fix mainly for TravisCI: + // accept full microsecond resolution in result for DATETIME columns + // where the binary protocol was used + return + } + dbt.Errorf("%s [%s] to string: expected %q, got %q", + dbtype, mode, + t.s, str, + ) + case time.Time: + if val == t.t { + return + } + dbt.Errorf("%s [%s] to string: expected %q, got %q", + dbtype, mode, + t.s, val.Format(tlayout), + ) + default: + fmt.Printf("%#v\n", []interface{}{dbtype, tlayout, mode, t.s, t.t}) + dbt.Errorf("%s [%s]: unhandled type %T (is '%v')", + dbtype, mode, + val, val, + ) + } +} + +func TestDateTime(t *testing.T) { + afterTime := func(t time.Time, d string) time.Time { + dur, err := time.ParseDuration(d) + if err != nil { + panic(err) + } + return t.Add(dur) + } + // NOTE: MySQL rounds DATETIME(x) up - but that's not included in the tests + format := "2006-01-02 15:04:05.999999" + t0 := time.Time{} + tstr0 := "0000-00-00 00:00:00.000000" + testcases := []timeTests{ + {"DATE", format[:10], []timeTest{ + {t: time.Date(2011, 11, 20, 0, 0, 0, 0, time.UTC)}, + {t: t0, s: tstr0[:10]}, + }}, + {"DATETIME", format[:19], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)}, + {t: t0, s: tstr0[:19]}, + }}, + {"DATETIME(0)", format[:21], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)}, + {t: t0, s: tstr0[:19]}, + }}, + {"DATETIME(1)", format[:21], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 100000000, time.UTC)}, + {t: t0, s: tstr0[:21]}, + }}, + {"DATETIME(6)", format, []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 123456000, time.UTC)}, + {t: t0, s: tstr0}, + }}, + {"TIME", format[11:19], []timeTest{ + {t: afterTime(t0, "12345s")}, + {s: "!-12:34:56"}, + {s: "!-838:59:59"}, + {s: "!838:59:59"}, + {t: t0, s: tstr0[11:19]}, + }}, + {"TIME(0)", format[11:19], []timeTest{ + {t: afterTime(t0, "12345s")}, + {s: "!-12:34:56"}, + {s: "!-838:59:59"}, + {s: "!838:59:59"}, + {t: t0, s: tstr0[11:19]}, + }}, + {"TIME(1)", format[11:21], []timeTest{ + {t: afterTime(t0, "12345600ms")}, + {s: "!-12:34:56.7"}, + {s: "!-838:59:58.9"}, + {s: "!838:59:58.9"}, + {t: t0, s: tstr0[11:21]}, + }}, + {"TIME(6)", format[11:], []timeTest{ + {t: afterTime(t0, "1234567890123000ns")}, + {s: "!-12:34:56.789012"}, + {s: "!-838:59:58.999999"}, + {s: "!838:59:58.999999"}, + {t: t0, s: tstr0[11:]}, + }}, + } + dsns := []string{ + dsn + "&parseTime=true", + dsn + "&parseTime=false", + } + for _, testdsn := range dsns { + runTests(t, testdsn, func(dbt *DBTest) { + microsecsSupported := false + zeroDateSupported := false + var rows *sql.Rows + var err error + rows, err = dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`) + if err == nil { + rows.Scan(µsecsSupported) + rows.Close() + } + rows, err = dbt.db.Query(`SELECT cast("0000-00-00" as DATE) = "0000-00-00"`) + if err == nil { + rows.Scan(&zeroDateSupported) + rows.Close() + } + for _, setups := range testcases { + if t := setups.dbtype; !microsecsSupported && t[len(t)-1:] == ")" { + // skip fractional second tests if unsupported by server + continue + } + for _, setup := range setups.tests { + allowBinTime := true + if setup.s == "" { + // fill time string whereever Go can reliable produce it + setup.s = setup.t.Format(setups.tlayout) + } else if setup.s[0] == '!' { + // skip tests using setup.t as source in queries + allowBinTime = false + // fix setup.s - remove the "!" + setup.s = setup.s[1:] + } + if !zeroDateSupported && setup.s == tstr0[:len(setup.s)] { + // skip disallowed 0000-00-00 date + continue + } + setup.run(dbt, setups.dbtype, setups.tlayout, textString) + setup.run(dbt, setups.dbtype, setups.tlayout, binaryString) + if allowBinTime { + setup.run(dbt, setups.dbtype, setups.tlayout, binaryTime) + } + } + } + }) + } +} + +func TestTimestampMicros(t *testing.T) { + format := "2006-01-02 15:04:05.999999" + f0 := format[:19] + f1 := format[:21] + f6 := format[:26] + runTests(t, dsn, func(dbt *DBTest) { + // check if microseconds are supported. + // Do not use timestamp(x) for that check - before 5.5.6, x would mean display width + // and not precision. + // Se last paragraph at http://dev.mysql.com/doc/refman/5.6/en/fractional-seconds.html + microsecsSupported := false + if rows, err := dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`); err == nil { + rows.Scan(µsecsSupported) + rows.Close() + } + if !microsecsSupported { + // skip test + return + } + _, err := dbt.db.Exec(` + CREATE TABLE test ( + value0 TIMESTAMP NOT NULL DEFAULT '` + f0 + `', + value1 TIMESTAMP(1) NOT NULL DEFAULT '` + f1 + `', + value6 TIMESTAMP(6) NOT NULL DEFAULT '` + f6 + `' + )`, + ) + if err != nil { + dbt.Error(err) + } + defer dbt.mustExec("DROP TABLE IF EXISTS test") + dbt.mustExec("INSERT INTO test SET value0=?, value1=?, value6=?", f0, f1, f6) + var res0, res1, res6 string + rows := dbt.mustQuery("SELECT * FROM test") + if !rows.Next() { + dbt.Errorf("test contained no selectable values") + } + err = rows.Scan(&res0, &res1, &res6) + if err != nil { + dbt.Error(err) + } + if res0 != f0 { + dbt.Errorf("expected %q, got %q", f0, res0) + } + if res1 != f1 { + dbt.Errorf("expected %q, got %q", f1, res1) + } + if res6 != f6 { + dbt.Errorf("expected %q, got %q", f6, res6) + } + }) +} + +func TestNULL(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + nullStmt, err := dbt.db.Prepare("SELECT NULL") + if err != nil { + dbt.Fatal(err) + } + defer nullStmt.Close() + + nonNullStmt, err := dbt.db.Prepare("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + defer nonNullStmt.Close() + + // NullBool + var nb sql.NullBool + // Invalid + if err = nullStmt.QueryRow().Scan(&nb); err != nil { + dbt.Fatal(err) + } + if nb.Valid { + dbt.Error("valid NullBool which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&nb); err != nil { + dbt.Fatal(err) + } + if !nb.Valid { + dbt.Error("invalid NullBool which should be valid") + } else if nb.Bool != true { + dbt.Errorf("Unexpected NullBool value: %t (should be true)", nb.Bool) + } + + // NullFloat64 + var nf sql.NullFloat64 + // Invalid + if err = nullStmt.QueryRow().Scan(&nf); err != nil { + dbt.Fatal(err) + } + if nf.Valid { + dbt.Error("valid NullFloat64 which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&nf); err != nil { + dbt.Fatal(err) + } + if !nf.Valid { + dbt.Error("invalid NullFloat64 which should be valid") + } else if nf.Float64 != float64(1) { + dbt.Errorf("unexpected NullFloat64 value: %f (should be 1.0)", nf.Float64) + } + + // NullInt64 + var ni sql.NullInt64 + // Invalid + if err = nullStmt.QueryRow().Scan(&ni); err != nil { + dbt.Fatal(err) + } + if ni.Valid { + dbt.Error("valid NullInt64 which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&ni); err != nil { + dbt.Fatal(err) + } + if !ni.Valid { + dbt.Error("invalid NullInt64 which should be valid") + } else if ni.Int64 != int64(1) { + dbt.Errorf("unexpected NullInt64 value: %d (should be 1)", ni.Int64) + } + + // NullString + var ns sql.NullString + // Invalid + if err = nullStmt.QueryRow().Scan(&ns); err != nil { + dbt.Fatal(err) + } + if ns.Valid { + dbt.Error("valid NullString which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&ns); err != nil { + dbt.Fatal(err) + } + if !ns.Valid { + dbt.Error("invalid NullString which should be valid") + } else if ns.String != `1` { + dbt.Error("unexpected NullString value:" + ns.String + " (should be `1`)") + } + + // nil-bytes + var b []byte + // Read nil + if err = nullStmt.QueryRow().Scan(&b); err != nil { + dbt.Fatal(err) + } + if b != nil { + dbt.Error("non-nil []byte wich should be nil") + } + // Read non-nil + if err = nonNullStmt.QueryRow().Scan(&b); err != nil { + dbt.Fatal(err) + } + if b == nil { + dbt.Error("nil []byte wich should be non-nil") + } + // Insert nil + b = nil + success := false + if err = dbt.db.QueryRow("SELECT ? IS NULL", b).Scan(&success); err != nil { + dbt.Fatal(err) + } + if !success { + dbt.Error("inserting []byte(nil) as NULL failed") + } + // Check input==output with input==nil + b = nil + if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil { + dbt.Fatal(err) + } + if b != nil { + dbt.Error("non-nil echo from nil input") + } + // Check input==output with input!=nil + b = []byte("") + if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil { + dbt.Fatal(err) + } + if b == nil { + dbt.Error("nil echo from non-nil input") + } + + // Insert NULL + dbt.mustExec("CREATE TABLE test (dummmy1 int, value int, dummy2 int)") + + dbt.mustExec("INSERT INTO test VALUES (?, ?, ?)", 1, nil, 2) + + var out interface{} + rows := dbt.mustQuery("SELECT * FROM test") + if rows.Next() { + rows.Scan(&out) + if out != nil { + dbt.Errorf("%v != nil", out) + } + } else { + dbt.Error("no data") + } + }) +} + +func TestUint64(t *testing.T) { + const ( + u0 = uint64(0) + uall = ^u0 + uhigh = uall >> 1 + utop = ^uhigh + s0 = int64(0) + sall = ^s0 + shigh = int64(uhigh) + stop = ^shigh + ) + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare(`SELECT ?, ?, ? ,?, ?, ?, ?, ?`) + if err != nil { + dbt.Fatal(err) + } + defer stmt.Close() + row := stmt.QueryRow( + u0, uhigh, utop, uall, + s0, shigh, stop, sall, + ) + + var ua, ub, uc, ud uint64 + var sa, sb, sc, sd int64 + + err = row.Scan(&ua, &ub, &uc, &ud, &sa, &sb, &sc, &sd) + if err != nil { + dbt.Fatal(err) + } + switch { + case ua != u0, + ub != uhigh, + uc != utop, + ud != uall, + sa != s0, + sb != shigh, + sc != stop, + sd != sall: + dbt.Fatal("unexpected result value") + } + }) +} + +func TestLongData(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + var maxAllowedPacketSize int + err := dbt.db.QueryRow("select @@max_allowed_packet").Scan(&maxAllowedPacketSize) + if err != nil { + dbt.Fatal(err) + } + maxAllowedPacketSize-- + + // don't get too ambitious + if maxAllowedPacketSize > 1<<25 { + maxAllowedPacketSize = 1 << 25 + } + + dbt.mustExec("CREATE TABLE test (value LONGBLOB)") + + in := strings.Repeat(`a`, maxAllowedPacketSize+1) + var out string + var rows *sql.Rows + + // Long text data + const nonDataQueryLen = 28 // length query w/o value + inS := in[:maxAllowedPacketSize-nonDataQueryLen] + dbt.mustExec("INSERT INTO test VALUES('" + inS + "')") + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if inS != out { + dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(inS), len(out)) + } + if rows.Next() { + dbt.Error("LONGBLOB: unexpexted row") + } + } else { + dbt.Fatalf("LONGBLOB: no data") + } + + // Empty table + dbt.mustExec("TRUNCATE TABLE test") + + // Long binary data + dbt.mustExec("INSERT INTO test VALUES(?)", in) + rows = dbt.mustQuery("SELECT value FROM test WHERE 1=?", 1) + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(in), len(out)) + } + if rows.Next() { + dbt.Error("LONGBLOB: unexpexted row") + } + } else { + if err = rows.Err(); err != nil { + dbt.Fatalf("LONGBLOB: no data (err: %s)", err.Error()) + } else { + dbt.Fatal("LONGBLOB: no data (err: )") + } + } + }) +} + +func TestLoadData(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + verifyLoadDataResult := func() { + rows, err := dbt.db.Query("SELECT * FROM test") + if err != nil { + dbt.Fatal(err.Error()) + } + + i := 0 + values := [4]string{ + "a string", + "a string containing a \t", + "a string containing a \n", + "a string containing both \t\n", + } + + var id int + var value string + + for rows.Next() { + i++ + err = rows.Scan(&id, &value) + if err != nil { + dbt.Fatal(err.Error()) + } + if i != id { + dbt.Fatalf("%d != %d", i, id) + } + if values[i-1] != value { + dbt.Fatalf("%q != %q", values[i-1], value) + } + } + err = rows.Err() + if err != nil { + dbt.Fatal(err.Error()) + } + + if i != 4 { + dbt.Fatalf("rows count mismatch. Got %d, want 4", i) + } + } + file, err := ioutil.TempFile("", "gotest") + defer os.Remove(file.Name()) + if err != nil { + dbt.Fatal(err) + } + file.WriteString("1\ta string\n2\ta string containing a \\t\n3\ta string containing a \\n\n4\ta string containing both \\t\\n\n") + file.Close() + + dbt.db.Exec("DROP TABLE IF EXISTS test") + dbt.mustExec("CREATE TABLE test (id INT NOT NULL PRIMARY KEY, value TEXT NOT NULL) CHARACTER SET utf8") + + // Local File + RegisterLocalFile(file.Name()) + dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name())) + verifyLoadDataResult() + // negative test + _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'doesnotexist' INTO TABLE test") + if err == nil { + dbt.Fatal("load non-existent file didn't fail") + } else if err.Error() != "local file 'doesnotexist' is not registered" { + dbt.Fatal(err.Error()) + } + + // Empty table + dbt.mustExec("TRUNCATE TABLE test") + + // Reader + RegisterReaderHandler("test", func() io.Reader { + file, err = os.Open(file.Name()) + if err != nil { + dbt.Fatal(err) + } + return file + }) + dbt.mustExec("LOAD DATA LOCAL INFILE 'Reader::test' INTO TABLE test") + verifyLoadDataResult() + // negative test + _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'Reader::doesnotexist' INTO TABLE test") + if err == nil { + dbt.Fatal("load non-existent Reader didn't fail") + } else if err.Error() != "Reader 'doesnotexist' is not registered" { + dbt.Fatal(err.Error()) + } + }) +} + +func TestFoundRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)") + dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)") + + res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 affected rows, got %d", count) + } + res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 affected rows, got %d", count) + } + }) + runTests(t, dsn+"&clientFoundRows=true", func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)") + dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)") + + res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 matched rows, got %d", count) + } + res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 3 { + dbt.Fatalf("Expected 3 matched rows, got %d", count) + } + }) +} + +func TestStrict(t *testing.T) { + // ALLOW_INVALID_DATES to get rid of stricter modes - we want to test for warnings, not errors + relaxedDsn := dsn + "&sql_mode='ALLOW_INVALID_DATES,NO_AUTO_CREATE_USER'" + // make sure the MySQL version is recent enough with a separate connection + // before running the test + conn, err := MySQLDriver{}.Open(relaxedDsn) + if conn != nil { + conn.Close() + } + if me, ok := err.(*MySQLError); ok && me.Number == 1231 { + // Error 1231: Variable 'sql_mode' can't be set to the value of 'ALLOW_INVALID_DATES' + // => skip test, MySQL server version is too old + return + } + runTests(t, relaxedDsn, func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (a TINYINT NOT NULL, b CHAR(4))") + + var queries = [...]struct { + in string + codes []string + }{ + {"DROP TABLE IF EXISTS no_such_table", []string{"1051"}}, + {"INSERT INTO test VALUES(10,'mysql'),(NULL,'test'),(300,'Open Source')", []string{"1265", "1048", "1264", "1265"}}, + } + var err error + + var checkWarnings = func(err error, mode string, idx int) { + if err == nil { + dbt.Errorf("expected STRICT error on query [%s] %s", mode, queries[idx].in) + } + + if warnings, ok := err.(MySQLWarnings); ok { + var codes = make([]string, len(warnings)) + for i := range warnings { + codes[i] = warnings[i].Code + } + if len(codes) != len(queries[idx].codes) { + dbt.Errorf("unexpected STRICT error count on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes) + } + + for i := range warnings { + if codes[i] != queries[idx].codes[i] { + dbt.Errorf("unexpected STRICT error codes on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes) + return + } + } + + } else { + dbt.Errorf("unexpected error on query [%s] %s: %s", mode, queries[idx].in, err.Error()) + } + } + + // text protocol + for i := range queries { + _, err = dbt.db.Exec(queries[i].in) + checkWarnings(err, "text", i) + } + + var stmt *sql.Stmt + + // binary protocol + for i := range queries { + stmt, err = dbt.db.Prepare(queries[i].in) + if err != nil { + dbt.Errorf("error on preparing query %s: %s", queries[i].in, err.Error()) + } + + _, err = stmt.Exec() + checkWarnings(err, "binary", i) + + err = stmt.Close() + if err != nil { + dbt.Errorf("error on closing stmt for query %s: %s", queries[i].in, err.Error()) + } + } + }) +} + +func TestTLS(t *testing.T) { + tlsTest := func(dbt *DBTest) { + if err := dbt.db.Ping(); err != nil { + if err == ErrNoTLS { + dbt.Skip("server does not support TLS") + } else { + dbt.Fatalf("error on Ping: %s", err.Error()) + } + } + + rows := dbt.mustQuery("SHOW STATUS LIKE 'Ssl_cipher'") + + var variable, value *sql.RawBytes + for rows.Next() { + if err := rows.Scan(&variable, &value); err != nil { + dbt.Fatal(err.Error()) + } + + if value == nil { + dbt.Fatal("no Cipher") + } + } + } + + runTests(t, dsn+"&tls=skip-verify", tlsTest) + + // Verify that registering / using a custom cfg works + RegisterTLSConfig("custom-skip-verify", &tls.Config{ + InsecureSkipVerify: true, + }) + runTests(t, dsn+"&tls=custom-skip-verify", tlsTest) +} + +func TestReuseClosedConnection(t *testing.T) { + // this test does not use sql.database, it uses the driver directly + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + md := &MySQLDriver{} + conn, err := md.Open(dsn) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + stmt, err := conn.Prepare("DO 1") + if err != nil { + t.Fatalf("error preparing statement: %s", err.Error()) + } + _, err = stmt.Exec(nil) + if err != nil { + t.Fatalf("error executing statement: %s", err.Error()) + } + err = conn.Close() + if err != nil { + t.Fatalf("error closing connection: %s", err.Error()) + } + + defer func() { + if err := recover(); err != nil { + t.Errorf("panic after reusing a closed connection: %v", err) + } + }() + _, err = stmt.Exec(nil) + if err != nil && err != driver.ErrBadConn { + t.Errorf("unexpected error '%s', expected '%s'", + err.Error(), driver.ErrBadConn.Error()) + } +} + +func TestCharset(t *testing.T) { + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + mustSetCharset := func(charsetParam, expected string) { + runTests(t, dsn+"&"+charsetParam, func(dbt *DBTest) { + rows := dbt.mustQuery("SELECT @@character_set_connection") + defer rows.Close() + + if !rows.Next() { + dbt.Fatalf("error getting connection charset: %s", rows.Err()) + } + + var got string + rows.Scan(&got) + + if got != expected { + dbt.Fatalf("expected connection charset %s but got %s", expected, got) + } + }) + } + + // non utf8 test + mustSetCharset("charset=ascii", "ascii") + + // when the first charset is invalid, use the second + mustSetCharset("charset=none,utf8", "utf8") + + // when the first charset is valid, use it + mustSetCharset("charset=ascii,utf8", "ascii") + mustSetCharset("charset=utf8,ascii", "utf8") +} + +func TestFailingCharset(t *testing.T) { + runTests(t, dsn+"&charset=none", func(dbt *DBTest) { + // run query to really establish connection... + _, err := dbt.db.Exec("SELECT 1") + if err == nil { + dbt.db.Close() + t.Fatalf("connection must not succeed without a valid charset") + } + }) +} + +func TestCollation(t *testing.T) { + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + defaultCollation := "utf8_general_ci" + testCollations := []string{ + "", // do not set + defaultCollation, // driver default + "latin1_general_ci", + "binary", + "utf8_unicode_ci", + "cp1257_bin", + } + + for _, collation := range testCollations { + var expected, tdsn string + if collation != "" { + tdsn = dsn + "&collation=" + collation + expected = collation + } else { + tdsn = dsn + expected = defaultCollation + } + + runTests(t, tdsn, func(dbt *DBTest) { + var got string + if err := dbt.db.QueryRow("SELECT @@collation_connection").Scan(&got); err != nil { + dbt.Fatal(err) + } + + if got != expected { + dbt.Fatalf("expected connection collation %s but got %s", expected, got) + } + }) + } +} + +func TestColumnsWithAlias(t *testing.T) { + runTests(t, dsn+"&columnsWithAlias=true", func(dbt *DBTest) { + rows := dbt.mustQuery("SELECT 1 AS A") + defer rows.Close() + cols, _ := rows.Columns() + if len(cols) != 1 { + t.Fatalf("expected 1 column, got %d", len(cols)) + } + if cols[0] != "A" { + t.Fatalf("expected column name \"A\", got \"%s\"", cols[0]) + } + rows.Close() + + rows = dbt.mustQuery("SELECT * FROM (SELECT 1 AS one) AS A") + cols, _ = rows.Columns() + if len(cols) != 1 { + t.Fatalf("expected 1 column, got %d", len(cols)) + } + if cols[0] != "A.one" { + t.Fatalf("expected column name \"A.one\", got \"%s\"", cols[0]) + } + }) +} + +func TestRawBytesResultExceedsBuffer(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // defaultBufSize from buffer.go + expected := strings.Repeat("abc", defaultBufSize) + + rows := dbt.mustQuery("SELECT '" + expected + "'") + defer rows.Close() + if !rows.Next() { + dbt.Error("expected result, got none") + } + var result sql.RawBytes + rows.Scan(&result) + if expected != string(result) { + dbt.Error("result did not match expected value") + } + }) +} + +func TestTimezoneConversion(t *testing.T) { + zones := []string{"UTC", "US/Central", "US/Pacific", "Local"} + + // Regression test for timezone handling + tzTest := func(dbt *DBTest) { + + // Create table + dbt.mustExec("CREATE TABLE test (ts TIMESTAMP)") + + // Insert local time into database (should be converted) + usCentral, _ := time.LoadLocation("US/Central") + reftime := time.Date(2014, 05, 30, 18, 03, 17, 0, time.UTC).In(usCentral) + dbt.mustExec("INSERT INTO test VALUE (?)", reftime) + + // Retrieve time from DB + rows := dbt.mustQuery("SELECT ts FROM test") + if !rows.Next() { + dbt.Fatal("did not get any rows out") + } + + var dbTime time.Time + err := rows.Scan(&dbTime) + if err != nil { + dbt.Fatal("Err", err) + } + + // Check that dates match + if reftime.Unix() != dbTime.Unix() { + dbt.Errorf("times do not match.\n") + dbt.Errorf(" Now(%v)=%v\n", usCentral, reftime) + dbt.Errorf(" Now(UTC)=%v\n", dbTime) + } + } + + for _, tz := range zones { + runTests(t, dsn+"&parseTime=true&loc="+url.QueryEscape(tz), tzTest) + } +} + +// Special cases + +func TestRowsClose(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + rows, err := dbt.db.Query("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + + err = rows.Close() + if err != nil { + dbt.Fatal(err) + } + + if rows.Next() { + dbt.Fatal("unexpected row after rows.Close()") + } + + err = rows.Err() + if err != nil { + dbt.Fatal(err) + } + }) +} + +// dangling statements +// http://code.google.com/p/go/issues/detail?id=3865 +func TestCloseStmtBeforeRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + + rows, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows.Close() + + err = stmt.Close() + if err != nil { + dbt.Fatal(err) + } + + if !rows.Next() { + dbt.Fatal("getting row failed") + } else { + err = rows.Err() + if err != nil { + dbt.Fatal(err) + } + + var out bool + err = rows.Scan(&out) + if err != nil { + dbt.Fatalf("error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + }) +} + +// It is valid to have multiple Rows for the same Stmt +// http://code.google.com/p/go/issues/detail?id=3734 +func TestStmtMultiRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare("SELECT 1 UNION SELECT 0") + if err != nil { + dbt.Fatal(err) + } + + rows1, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows1.Close() + + rows2, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows2.Close() + + var out bool + + // 1 + if !rows1.Next() { + dbt.Fatal("first rows1.Next failed") + } else { + err = rows1.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows1.Scan(&out) + if err != nil { + dbt.Fatalf("error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + + if !rows2.Next() { + dbt.Fatal("first rows2.Next failed") + } else { + err = rows2.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows2.Scan(&out) + if err != nil { + dbt.Fatalf("error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + + // 2 + if !rows1.Next() { + dbt.Fatal("second rows1.Next failed") + } else { + err = rows1.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows1.Scan(&out) + if err != nil { + dbt.Fatalf("error on rows.Scan(): %s", err.Error()) + } + if out != false { + dbt.Errorf("false != %t", out) + } + + if rows1.Next() { + dbt.Fatal("unexpected row on rows1") + } + err = rows1.Close() + if err != nil { + dbt.Fatal(err) + } + } + + if !rows2.Next() { + dbt.Fatal("second rows2.Next failed") + } else { + err = rows2.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows2.Scan(&out) + if err != nil { + dbt.Fatalf("error on rows.Scan(): %s", err.Error()) + } + if out != false { + dbt.Errorf("false != %t", out) + } + + if rows2.Next() { + dbt.Fatal("unexpected row on rows2") + } + err = rows2.Close() + if err != nil { + dbt.Fatal(err) + } + } + }) +} + +// Regression test for +// * more than 32 NULL parameters (issue 209) +// * more parameters than fit into the buffer (issue 201) +func TestPreparedManyCols(t *testing.T) { + const numParams = defaultBufSize + runTests(t, dsn, func(dbt *DBTest) { + query := "SELECT ?" + strings.Repeat(",?", numParams-1) + stmt, err := dbt.db.Prepare(query) + if err != nil { + dbt.Fatal(err) + } + defer stmt.Close() + // create more parameters than fit into the buffer + // which will take nil-values + params := make([]interface{}, numParams) + rows, err := stmt.Query(params...) + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows.Close() + }) +} + +func TestConcurrent(t *testing.T) { + if enabled, _ := readBool(os.Getenv("MYSQL_TEST_CONCURRENT")); !enabled { + t.Skip("MYSQL_TEST_CONCURRENT env var not set") + } + + runTests(t, dsn, func(dbt *DBTest) { + var max int + err := dbt.db.QueryRow("SELECT @@max_connections").Scan(&max) + if err != nil { + dbt.Fatalf("%s", err.Error()) + } + dbt.Logf("testing up to %d concurrent connections \r\n", max) + + var remaining, succeeded int32 = int32(max), 0 + + var wg sync.WaitGroup + wg.Add(max) + + var fatalError string + var once sync.Once + fatalf := func(s string, vals ...interface{}) { + once.Do(func() { + fatalError = fmt.Sprintf(s, vals...) + }) + } + + for i := 0; i < max; i++ { + go func(id int) { + defer wg.Done() + + tx, err := dbt.db.Begin() + atomic.AddInt32(&remaining, -1) + + if err != nil { + if err.Error() != "Error 1040: Too many connections" { + fatalf("error on conn %d: %s", id, err.Error()) + } + return + } + + // keep the connection busy until all connections are open + for remaining > 0 { + if _, err = tx.Exec("DO 1"); err != nil { + fatalf("error on conn %d: %s", id, err.Error()) + return + } + } + + if err = tx.Commit(); err != nil { + fatalf("error on conn %d: %s", id, err.Error()) + return + } + + // everything went fine with this connection + atomic.AddInt32(&succeeded, 1) + }(i) + } + + // wait until all conections are open + wg.Wait() + + if fatalError != "" { + dbt.Fatal(fatalError) + } + + dbt.Logf("reached %d concurrent connections\r\n", succeeded) + }) +} + +// Tests custom dial functions +func TestCustomDial(t *testing.T) { + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + // our custom dial function which justs wraps net.Dial here + RegisterDial("mydial", func(addr string) (net.Conn, error) { + return net.Dial(prot, addr) + }) + + db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s&strict=true", user, pass, addr, dbname)) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db.Close() + + if _, err = db.Exec("DO 1"); err != nil { + t.Fatalf("connection failed: %s", err.Error()) + } +} + +func TestSQLInjection(t *testing.T) { + createTest := func(arg string) func(dbt *DBTest) { + return func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (v INTEGER)") + dbt.mustExec("INSERT INTO test VALUES (?)", 1) + + var v int + // NULL can't be equal to anything, the idea here is to inject query so it returns row + // This test verifies that escapeQuotes and escapeBackslash are working properly + err := dbt.db.QueryRow("SELECT v FROM test WHERE NULL = ?", arg).Scan(&v) + if err == sql.ErrNoRows { + return // success, sql injection failed + } else if err == nil { + dbt.Errorf("sql injection successful with arg: %s", arg) + } else { + dbt.Errorf("error running query with arg: %s; err: %s", arg, err.Error()) + } + } + } + + dsns := []string{ + dsn, + dsn + "&sql_mode='NO_BACKSLASH_ESCAPES,NO_AUTO_CREATE_USER'", + } + for _, testdsn := range dsns { + runTests(t, testdsn, createTest("1 OR 1=1")) + runTests(t, testdsn, createTest("' OR '1'='1")) + } +} + +// Test if inserted data is correctly retrieved after being escaped +func TestInsertRetrieveEscapedData(t *testing.T) { + testData := func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (v VARCHAR(255))") + + // All sequences that are escaped by escapeQuotes and escapeBackslash + v := "foo \x00\n\r\x1a\"'\\" + dbt.mustExec("INSERT INTO test VALUES (?)", v) + + var out string + err := dbt.db.QueryRow("SELECT v FROM test").Scan(&out) + if err != nil { + dbt.Fatalf("%s", err.Error()) + } + + if out != v { + dbt.Errorf("%q != %q", out, v) + } + } + + dsns := []string{ + dsn, + dsn + "&sql_mode='NO_BACKSLASH_ESCAPES,NO_AUTO_CREATE_USER'", + } + for _, testdsn := range dsns { + runTests(t, testdsn, testData) + } +} + +func TestUnixSocketAuthFail(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // Save the current logger so we can restore it. + oldLogger := errLog + + // Set a new logger so we can capture its output. + buffer := bytes.NewBuffer(make([]byte, 0, 64)) + newLogger := log.New(buffer, "prefix: ", 0) + SetLogger(newLogger) + + // Restore the logger. + defer SetLogger(oldLogger) + + // Make a new DSN that uses the MySQL socket file and a bad password, which + // we can make by simply appending any character to the real password. + badPass := pass + "x" + socket := "" + if prot == "unix" { + socket = addr + } else { + // Get socket file from MySQL. + err := dbt.db.QueryRow("SELECT @@socket").Scan(&socket) + if err != nil { + t.Fatalf("error on SELECT @@socket: %s", err.Error()) + } + } + t.Logf("socket: %s", socket) + badDSN := fmt.Sprintf("%s:%s@unix(%s)/%s?timeout=30s&strict=true", user, badPass, socket, dbname) + db, err := sql.Open("mysql", badDSN) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db.Close() + + // Connect to MySQL for real. This will cause an auth failure. + err = db.Ping() + if err == nil { + t.Error("expected Ping() to return an error") + } + + // The driver should not log anything. + if actual := buffer.String(); actual != "" { + t.Errorf("expected no output, got %q", actual) + } + }) +} diff --git a/vendor/github.com/go-sql-driver/mysql/dsn_test.go b/vendor/github.com/go-sql-driver/mysql/dsn_test.go new file mode 100644 index 000000000000..80949e18ab1c --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/dsn_test.go @@ -0,0 +1,207 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/tls" + "fmt" + "net/url" + "testing" +) + +var testDSNs = []struct { + in string + out string +}{ + {"username:password@protocol(address)/dbname?param=value", "&{User:username Passwd:password Net:protocol Addr:address DBName:dbname Params:map[param:value] Collation:utf8_general_ci Loc:UTC TLSConfig: tls: Timeout:0 ReadTimeout:0 WriteTimeout:0 AllowAllFiles:false AllowCleartextPasswords:false AllowOldPasswords:false ClientFoundRows:false ColumnsWithAlias:false InterpolateParams:false MultiStatements:false ParseTime:false Strict:false}"}, + {"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true", "&{User:username Passwd:password Net:protocol Addr:address DBName:dbname Params:map[param:value] Collation:utf8_general_ci Loc:UTC TLSConfig: tls: Timeout:0 ReadTimeout:0 WriteTimeout:0 AllowAllFiles:false AllowCleartextPasswords:false AllowOldPasswords:false ClientFoundRows:false ColumnsWithAlias:true InterpolateParams:false MultiStatements:false ParseTime:false Strict:false}"}, + {"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true&multiStatements=true", "&{User:username Passwd:password Net:protocol Addr:address DBName:dbname Params:map[param:value] Collation:utf8_general_ci Loc:UTC TLSConfig: tls: Timeout:0 ReadTimeout:0 WriteTimeout:0 AllowAllFiles:false AllowCleartextPasswords:false AllowOldPasswords:false ClientFoundRows:false ColumnsWithAlias:true InterpolateParams:false MultiStatements:true ParseTime:false Strict:false}"}, + {"user@unix(/path/to/socket)/dbname?charset=utf8", "&{User:user Passwd: Net:unix Addr:/path/to/socket DBName:dbname Params:map[charset:utf8] Collation:utf8_general_ci Loc:UTC TLSConfig: tls: Timeout:0 ReadTimeout:0 WriteTimeout:0 AllowAllFiles:false AllowCleartextPasswords:false AllowOldPasswords:false ClientFoundRows:false ColumnsWithAlias:false InterpolateParams:false MultiStatements:false ParseTime:false Strict:false}"}, + {"user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true", "&{User:user Passwd:password Net:tcp Addr:localhost:5555 DBName:dbname Params:map[charset:utf8] Collation:utf8_general_ci Loc:UTC TLSConfig:true tls: Timeout:0 ReadTimeout:0 WriteTimeout:0 AllowAllFiles:false AllowCleartextPasswords:false AllowOldPasswords:false ClientFoundRows:false ColumnsWithAlias:false InterpolateParams:false MultiStatements:false ParseTime:false Strict:false}"}, + {"user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify", "&{User:user Passwd:password Net:tcp Addr:localhost:5555 DBName:dbname Params:map[charset:utf8mb4,utf8] Collation:utf8_general_ci Loc:UTC TLSConfig:skip-verify tls: Timeout:0 ReadTimeout:0 WriteTimeout:0 AllowAllFiles:false AllowCleartextPasswords:false AllowOldPasswords:false ClientFoundRows:false ColumnsWithAlias:false InterpolateParams:false MultiStatements:false ParseTime:false Strict:false}"}, + {"user:password@/dbname?loc=UTC&timeout=30s&readTimeout=1s&writeTimeout=1s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci", "&{User:user Passwd:password Net:tcp Addr:127.0.0.1:3306 DBName:dbname Params:map[] Collation:utf8mb4_unicode_ci Loc:UTC TLSConfig: tls: Timeout:30s ReadTimeout:1s WriteTimeout:1s AllowAllFiles:true AllowCleartextPasswords:false AllowOldPasswords:true ClientFoundRows:true ColumnsWithAlias:false InterpolateParams:false MultiStatements:false ParseTime:false Strict:false}"}, + {"user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local", "&{User:user Passwd:p@ss(word) Net:tcp Addr:[de:ad:be:ef::ca:fe]:80 DBName:dbname Params:map[] Collation:utf8_general_ci Loc:Local TLSConfig: tls: Timeout:0 ReadTimeout:0 WriteTimeout:0 AllowAllFiles:false AllowCleartextPasswords:false AllowOldPasswords:false ClientFoundRows:false ColumnsWithAlias:false InterpolateParams:false MultiStatements:false ParseTime:false Strict:false}"}, + {"/dbname", "&{User: Passwd: Net:tcp Addr:127.0.0.1:3306 DBName:dbname Params:map[] Collation:utf8_general_ci Loc:UTC TLSConfig: tls: Timeout:0 ReadTimeout:0 WriteTimeout:0 AllowAllFiles:false AllowCleartextPasswords:false AllowOldPasswords:false ClientFoundRows:false ColumnsWithAlias:false InterpolateParams:false MultiStatements:false ParseTime:false Strict:false}"}, + {"@/", "&{User: Passwd: Net:tcp Addr:127.0.0.1:3306 DBName: Params:map[] Collation:utf8_general_ci Loc:UTC TLSConfig: tls: Timeout:0 ReadTimeout:0 WriteTimeout:0 AllowAllFiles:false AllowCleartextPasswords:false AllowOldPasswords:false ClientFoundRows:false ColumnsWithAlias:false InterpolateParams:false MultiStatements:false ParseTime:false Strict:false}"}, + {"/", "&{User: Passwd: Net:tcp Addr:127.0.0.1:3306 DBName: Params:map[] Collation:utf8_general_ci Loc:UTC TLSConfig: tls: Timeout:0 ReadTimeout:0 WriteTimeout:0 AllowAllFiles:false AllowCleartextPasswords:false AllowOldPasswords:false ClientFoundRows:false ColumnsWithAlias:false InterpolateParams:false MultiStatements:false ParseTime:false Strict:false}"}, + {"", "&{User: Passwd: Net:tcp Addr:127.0.0.1:3306 DBName: Params:map[] Collation:utf8_general_ci Loc:UTC TLSConfig: tls: Timeout:0 ReadTimeout:0 WriteTimeout:0 AllowAllFiles:false AllowCleartextPasswords:false AllowOldPasswords:false ClientFoundRows:false ColumnsWithAlias:false InterpolateParams:false MultiStatements:false ParseTime:false Strict:false}"}, + {"user:p@/ssword@/", "&{User:user Passwd:p@/ssword Net:tcp Addr:127.0.0.1:3306 DBName: Params:map[] Collation:utf8_general_ci Loc:UTC TLSConfig: tls: Timeout:0 ReadTimeout:0 WriteTimeout:0 AllowAllFiles:false AllowCleartextPasswords:false AllowOldPasswords:false ClientFoundRows:false ColumnsWithAlias:false InterpolateParams:false MultiStatements:false ParseTime:false Strict:false}"}, + {"unix/?arg=%2Fsome%2Fpath.ext", "&{User: Passwd: Net:unix Addr:/tmp/mysql.sock DBName: Params:map[arg:/some/path.ext] Collation:utf8_general_ci Loc:UTC TLSConfig: tls: Timeout:0 ReadTimeout:0 WriteTimeout:0 AllowAllFiles:false AllowCleartextPasswords:false AllowOldPasswords:false ClientFoundRows:false ColumnsWithAlias:false InterpolateParams:false MultiStatements:false ParseTime:false Strict:false}"}, +} + +func TestDSNParser(t *testing.T) { + var cfg *Config + var err error + var res string + + for i, tst := range testDSNs { + cfg, err = ParseDSN(tst.in) + if err != nil { + t.Error(err.Error()) + } + + // pointer not static + cfg.tls = nil + + res = fmt.Sprintf("%+v", cfg) + if res != tst.out { + t.Errorf("%d. ParseDSN(%q) => %q, want %q", i, tst.in, res, tst.out) + } + } +} + +func TestDSNParserInvalid(t *testing.T) { + var invalidDSNs = []string{ + "@net(addr/", // no closing brace + "@tcp(/", // no closing brace + "tcp(/", // no closing brace + "(/", // no closing brace + "net(addr)//", // unescaped + "User:pass@tcp(1.2.3.4:3306)", // no trailing slash + //"/dbname?arg=/some/unescaped/path", + } + + for i, tst := range invalidDSNs { + if _, err := ParseDSN(tst); err == nil { + t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst) + } + } +} + +func TestDSNReformat(t *testing.T) { + for i, tst := range testDSNs { + dsn1 := tst.in + cfg1, err := ParseDSN(dsn1) + if err != nil { + t.Error(err.Error()) + continue + } + cfg1.tls = nil // pointer not static + res1 := fmt.Sprintf("%+v", cfg1) + + dsn2 := cfg1.FormatDSN() + cfg2, err := ParseDSN(dsn2) + if err != nil { + t.Error(err.Error()) + continue + } + cfg2.tls = nil // pointer not static + res2 := fmt.Sprintf("%+v", cfg2) + + if res1 != res2 { + t.Errorf("%d. %q does not match %q", i, res2, res1) + } + } +} + +func TestDSNWithCustomTLS(t *testing.T) { + baseDSN := "User:password@tcp(localhost:5555)/dbname?tls=" + tlsCfg := tls.Config{} + + RegisterTLSConfig("utils_test", &tlsCfg) + + // Custom TLS is missing + tst := baseDSN + "invalid_tls" + cfg, err := ParseDSN(tst) + if err == nil { + t.Errorf("invalid custom TLS in DSN (%s) but did not error. Got config: %#v", tst, cfg) + } + + tst = baseDSN + "utils_test" + + // Custom TLS with a server name + name := "foohost" + tlsCfg.ServerName = name + cfg, err = ParseDSN(tst) + + if err != nil { + t.Error(err.Error()) + } else if cfg.tls.ServerName != name { + t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, tst) + } + + // Custom TLS without a server name + name = "localhost" + tlsCfg.ServerName = "" + cfg, err = ParseDSN(tst) + + if err != nil { + t.Error(err.Error()) + } else if cfg.tls.ServerName != name { + t.Errorf("did not get the correct ServerName (%s) parsing DSN (%s).", name, tst) + } + + DeregisterTLSConfig("utils_test") +} + +func TestDSNWithCustomTLSQueryEscape(t *testing.T) { + const configKey = "&%!:" + dsn := "User:password@tcp(localhost:5555)/dbname?tls=" + url.QueryEscape(configKey) + name := "foohost" + tlsCfg := tls.Config{ServerName: name} + + RegisterTLSConfig(configKey, &tlsCfg) + + cfg, err := ParseDSN(dsn) + + if err != nil { + t.Error(err.Error()) + } else if cfg.tls.ServerName != name { + t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, dsn) + } +} + +func TestDSNUnsafeCollation(t *testing.T) { + _, err := ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=true") + if err != errInvalidDSNUnsafeCollation { + t.Errorf("expected %v, got %v", errInvalidDSNUnsafeCollation, err) + } + + _, err = ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=false") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } + + _, err = ParseDSN("/dbname?collation=gbk_chinese_ci") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } + + _, err = ParseDSN("/dbname?collation=ascii_bin&interpolateParams=true") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } + + _, err = ParseDSN("/dbname?collation=latin1_german1_ci&interpolateParams=true") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } + + _, err = ParseDSN("/dbname?collation=utf8_general_ci&interpolateParams=true") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } + + _, err = ParseDSN("/dbname?collation=utf8mb4_general_ci&interpolateParams=true") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } +} + +func BenchmarkParseDSN(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for _, tst := range testDSNs { + if _, err := ParseDSN(tst.in); err != nil { + b.Error(err.Error()) + } + } + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/errors_test.go b/vendor/github.com/go-sql-driver/mysql/errors_test.go new file mode 100644 index 000000000000..96f9126d679a --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/errors_test.go @@ -0,0 +1,42 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "log" + "testing" +) + +func TestErrorsSetLogger(t *testing.T) { + previous := errLog + defer func() { + errLog = previous + }() + + // set up logger + const expected = "prefix: test\n" + buffer := bytes.NewBuffer(make([]byte, 0, 64)) + logger := log.New(buffer, "prefix: ", 0) + + // print + SetLogger(logger) + errLog.Print("test") + + // check result + if actual := buffer.String(); actual != expected { + t.Errorf("expected %q, got %q", expected, actual) + } +} + +func TestErrorsStrictIgnoreNotes(t *testing.T) { + runTests(t, dsn+"&sql_notes=false", func(dbt *DBTest) { + dbt.mustExec("DROP TABLE IF EXISTS does_not_exist") + }) +} diff --git a/vendor/github.com/go-sql-driver/mysql/utils_test.go b/vendor/github.com/go-sql-driver/mysql/utils_test.go new file mode 100644 index 000000000000..0d6c6684f280 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/utils_test.go @@ -0,0 +1,197 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "encoding/binary" + "fmt" + "testing" + "time" +) + +func TestScanNullTime(t *testing.T) { + var scanTests = []struct { + in interface{} + error bool + valid bool + time time.Time + }{ + {tDate, false, true, tDate}, + {sDate, false, true, tDate}, + {[]byte(sDate), false, true, tDate}, + {tDateTime, false, true, tDateTime}, + {sDateTime, false, true, tDateTime}, + {[]byte(sDateTime), false, true, tDateTime}, + {tDate0, false, true, tDate0}, + {sDate0, false, true, tDate0}, + {[]byte(sDate0), false, true, tDate0}, + {sDateTime0, false, true, tDate0}, + {[]byte(sDateTime0), false, true, tDate0}, + {"", true, false, tDate0}, + {"1234", true, false, tDate0}, + {0, true, false, tDate0}, + } + + var nt = NullTime{} + var err error + + for _, tst := range scanTests { + err = nt.Scan(tst.in) + if (err != nil) != tst.error { + t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil)) + } + if nt.Valid != tst.valid { + t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid) + } + if nt.Time != tst.time { + t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time) + } + } +} + +func TestLengthEncodedInteger(t *testing.T) { + var integerTests = []struct { + num uint64 + encoded []byte + }{ + {0x0000000000000000, []byte{0x00}}, + {0x0000000000000012, []byte{0x12}}, + {0x00000000000000fa, []byte{0xfa}}, + {0x0000000000000100, []byte{0xfc, 0x00, 0x01}}, + {0x0000000000001234, []byte{0xfc, 0x34, 0x12}}, + {0x000000000000ffff, []byte{0xfc, 0xff, 0xff}}, + {0x0000000000010000, []byte{0xfd, 0x00, 0x00, 0x01}}, + {0x0000000000123456, []byte{0xfd, 0x56, 0x34, 0x12}}, + {0x0000000000ffffff, []byte{0xfd, 0xff, 0xff, 0xff}}, + {0x0000000001000000, []byte{0xfe, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}}, + {0x123456789abcdef0, []byte{0xfe, 0xf0, 0xde, 0xbc, 0x9a, 0x78, 0x56, 0x34, 0x12}}, + {0xffffffffffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, + } + + for _, tst := range integerTests { + num, isNull, numLen := readLengthEncodedInteger(tst.encoded) + if isNull { + t.Errorf("%x: expected %d, got NULL", tst.encoded, tst.num) + } + if num != tst.num { + t.Errorf("%x: expected %d, got %d", tst.encoded, tst.num, num) + } + if numLen != len(tst.encoded) { + t.Errorf("%x: expected size %d, got %d", tst.encoded, len(tst.encoded), numLen) + } + encoded := appendLengthEncodedInteger(nil, num) + if !bytes.Equal(encoded, tst.encoded) { + t.Errorf("%v: expected %x, got %x", num, tst.encoded, encoded) + } + } +} + +func TestOldPass(t *testing.T) { + scramble := []byte{9, 8, 7, 6, 5, 4, 3, 2} + vectors := []struct { + pass string + out string + }{ + {" pass", "47575c5a435b4251"}, + {"pass ", "47575c5a435b4251"}, + {"123\t456", "575c47505b5b5559"}, + {"C0mpl!ca ted#PASS123", "5d5d554849584a45"}, + } + for _, tuple := range vectors { + ours := scrambleOldPassword(scramble, []byte(tuple.pass)) + if tuple.out != fmt.Sprintf("%x", ours) { + t.Errorf("Failed old password %q", tuple.pass) + } + } +} + +func TestFormatBinaryDateTime(t *testing.T) { + rawDate := [11]byte{} + binary.LittleEndian.PutUint16(rawDate[:2], 1978) // years + rawDate[2] = 12 // months + rawDate[3] = 30 // days + rawDate[4] = 15 // hours + rawDate[5] = 46 // minutes + rawDate[6] = 23 // seconds + binary.LittleEndian.PutUint32(rawDate[7:], 987654) // microseconds + expect := func(expected string, inlen, outlen uint8) { + actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen, false) + bytes, ok := actual.([]byte) + if !ok { + t.Errorf("formatBinaryDateTime must return []byte, was %T", actual) + } + if string(bytes) != expected { + t.Errorf( + "expected %q, got %q for length in %d, out %d", + bytes, actual, inlen, outlen, + ) + } + } + expect("0000-00-00", 0, 10) + expect("0000-00-00 00:00:00", 0, 19) + expect("1978-12-30", 4, 10) + expect("1978-12-30 15:46:23", 7, 19) + expect("1978-12-30 15:46:23.987654", 11, 26) +} + +func TestEscapeBackslash(t *testing.T) { + expect := func(expected, value string) { + actual := string(escapeBytesBackslash([]byte{}, []byte(value))) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + + actual = string(escapeStringBackslash([]byte{}, value)) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + } + + expect("foo\\0bar", "foo\x00bar") + expect("foo\\nbar", "foo\nbar") + expect("foo\\rbar", "foo\rbar") + expect("foo\\Zbar", "foo\x1abar") + expect("foo\\\"bar", "foo\"bar") + expect("foo\\\\bar", "foo\\bar") + expect("foo\\'bar", "foo'bar") +} + +func TestEscapeQuotes(t *testing.T) { + expect := func(expected, value string) { + actual := string(escapeBytesQuotes([]byte{}, []byte(value))) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + + actual = string(escapeStringQuotes([]byte{}, value)) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + } + + expect("foo\x00bar", "foo\x00bar") // not affected + expect("foo\nbar", "foo\nbar") // not affected + expect("foo\rbar", "foo\rbar") // not affected + expect("foo\x1abar", "foo\x1abar") // not affected + expect("foo''bar", "foo'bar") // affected + expect("foo\"bar", "foo\"bar") // not affected +} diff --git a/vendor/github.com/golang/glog/BUILD b/vendor/github.com/golang/glog/BUILD index 33ff088a2fe2..cc600c860289 100644 --- a/vendor/github.com/golang/glog/BUILD +++ b/vendor/github.com/golang/glog/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -12,6 +7,14 @@ go_library( "glog_file.go", ], importpath = "github.com/golang/glog", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["glog_test.go"], + importpath = "github.com/golang/glog", + library = ":go_default_library", ) filegroup( @@ -25,4 +28,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/golang/glog/glog_test.go b/vendor/github.com/golang/glog/glog_test.go new file mode 100644 index 000000000000..0fb376e1fde8 --- /dev/null +++ b/vendor/github.com/golang/glog/glog_test.go @@ -0,0 +1,415 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package glog + +import ( + "bytes" + "fmt" + stdLog "log" + "path/filepath" + "runtime" + "strconv" + "strings" + "testing" + "time" +) + +// Test that shortHostname works as advertised. +func TestShortHostname(t *testing.T) { + for hostname, expect := range map[string]string{ + "": "", + "host": "host", + "host.google.com": "host", + } { + if got := shortHostname(hostname); expect != got { + t.Errorf("shortHostname(%q): expected %q, got %q", hostname, expect, got) + } + } +} + +// flushBuffer wraps a bytes.Buffer to satisfy flushSyncWriter. +type flushBuffer struct { + bytes.Buffer +} + +func (f *flushBuffer) Flush() error { + return nil +} + +func (f *flushBuffer) Sync() error { + return nil +} + +// swap sets the log writers and returns the old array. +func (l *loggingT) swap(writers [numSeverity]flushSyncWriter) (old [numSeverity]flushSyncWriter) { + l.mu.Lock() + defer l.mu.Unlock() + old = l.file + for i, w := range writers { + logging.file[i] = w + } + return +} + +// newBuffers sets the log writers to all new byte buffers and returns the old array. +func (l *loggingT) newBuffers() [numSeverity]flushSyncWriter { + return l.swap([numSeverity]flushSyncWriter{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)}) +} + +// contents returns the specified log value as a string. +func contents(s severity) string { + return logging.file[s].(*flushBuffer).String() +} + +// contains reports whether the string is contained in the log. +func contains(s severity, str string, t *testing.T) bool { + return strings.Contains(contents(s), str) +} + +// setFlags configures the logging flags how the test expects them. +func setFlags() { + logging.toStderr = false +} + +// Test that Info works as advertised. +func TestInfo(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + Info("test") + if !contains(infoLog, "I", t) { + t.Errorf("Info has wrong character: %q", contents(infoLog)) + } + if !contains(infoLog, "test", t) { + t.Error("Info failed") + } +} + +func TestInfoDepth(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + + f := func() { InfoDepth(1, "depth-test1") } + + // The next three lines must stay together + _, _, wantLine, _ := runtime.Caller(0) + InfoDepth(0, "depth-test0") + f() + + msgs := strings.Split(strings.TrimSuffix(contents(infoLog), "\n"), "\n") + if len(msgs) != 2 { + t.Fatalf("Got %d lines, expected 2", len(msgs)) + } + + for i, m := range msgs { + if !strings.HasPrefix(m, "I") { + t.Errorf("InfoDepth[%d] has wrong character: %q", i, m) + } + w := fmt.Sprintf("depth-test%d", i) + if !strings.Contains(m, w) { + t.Errorf("InfoDepth[%d] missing %q: %q", i, w, m) + } + + // pull out the line number (between : and ]) + msg := m[strings.LastIndex(m, ":")+1:] + x := strings.Index(msg, "]") + if x < 0 { + t.Errorf("InfoDepth[%d]: missing ']': %q", i, m) + continue + } + line, err := strconv.Atoi(msg[:x]) + if err != nil { + t.Errorf("InfoDepth[%d]: bad line number: %q", i, m) + continue + } + wantLine++ + if wantLine != line { + t.Errorf("InfoDepth[%d]: got line %d, want %d", i, line, wantLine) + } + } +} + +func init() { + CopyStandardLogTo("INFO") +} + +// Test that CopyStandardLogTo panics on bad input. +func TestCopyStandardLogToPanic(t *testing.T) { + defer func() { + if s, ok := recover().(string); !ok || !strings.Contains(s, "LOG") { + t.Errorf(`CopyStandardLogTo("LOG") should have panicked: %v`, s) + } + }() + CopyStandardLogTo("LOG") +} + +// Test that using the standard log package logs to INFO. +func TestStandardLog(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + stdLog.Print("test") + if !contains(infoLog, "I", t) { + t.Errorf("Info has wrong character: %q", contents(infoLog)) + } + if !contains(infoLog, "test", t) { + t.Error("Info failed") + } +} + +// Test that the header has the correct format. +func TestHeader(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + defer func(previous func() time.Time) { timeNow = previous }(timeNow) + timeNow = func() time.Time { + return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local) + } + pid = 1234 + Info("test") + var line int + format := "I0102 15:04:05.067890 1234 glog_test.go:%d] test\n" + n, err := fmt.Sscanf(contents(infoLog), format, &line) + if n != 1 || err != nil { + t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog)) + } + // Scanf treats multiple spaces as equivalent to a single space, + // so check for correct space-padding also. + want := fmt.Sprintf(format, line) + if contents(infoLog) != want { + t.Errorf("log format error: got:\n\t%q\nwant:\t%q", contents(infoLog), want) + } +} + +// Test that an Error log goes to Warning and Info. +// Even in the Info log, the source character will be E, so the data should +// all be identical. +func TestError(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + Error("test") + if !contains(errorLog, "E", t) { + t.Errorf("Error has wrong character: %q", contents(errorLog)) + } + if !contains(errorLog, "test", t) { + t.Error("Error failed") + } + str := contents(errorLog) + if !contains(warningLog, str, t) { + t.Error("Warning failed") + } + if !contains(infoLog, str, t) { + t.Error("Info failed") + } +} + +// Test that a Warning log goes to Info. +// Even in the Info log, the source character will be W, so the data should +// all be identical. +func TestWarning(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + Warning("test") + if !contains(warningLog, "W", t) { + t.Errorf("Warning has wrong character: %q", contents(warningLog)) + } + if !contains(warningLog, "test", t) { + t.Error("Warning failed") + } + str := contents(warningLog) + if !contains(infoLog, str, t) { + t.Error("Info failed") + } +} + +// Test that a V log goes to Info. +func TestV(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + logging.verbosity.Set("2") + defer logging.verbosity.Set("0") + V(2).Info("test") + if !contains(infoLog, "I", t) { + t.Errorf("Info has wrong character: %q", contents(infoLog)) + } + if !contains(infoLog, "test", t) { + t.Error("Info failed") + } +} + +// Test that a vmodule enables a log in this file. +func TestVmoduleOn(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + logging.vmodule.Set("glog_test=2") + defer logging.vmodule.Set("") + if !V(1) { + t.Error("V not enabled for 1") + } + if !V(2) { + t.Error("V not enabled for 2") + } + if V(3) { + t.Error("V enabled for 3") + } + V(2).Info("test") + if !contains(infoLog, "I", t) { + t.Errorf("Info has wrong character: %q", contents(infoLog)) + } + if !contains(infoLog, "test", t) { + t.Error("Info failed") + } +} + +// Test that a vmodule of another file does not enable a log in this file. +func TestVmoduleOff(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + logging.vmodule.Set("notthisfile=2") + defer logging.vmodule.Set("") + for i := 1; i <= 3; i++ { + if V(Level(i)) { + t.Errorf("V enabled for %d", i) + } + } + V(2).Info("test") + if contents(infoLog) != "" { + t.Error("V logged incorrectly") + } +} + +// vGlobs are patterns that match/don't match this file at V=2. +var vGlobs = map[string]bool{ + // Easy to test the numeric match here. + "glog_test=1": false, // If -vmodule sets V to 1, V(2) will fail. + "glog_test=2": true, + "glog_test=3": true, // If -vmodule sets V to 1, V(3) will succeed. + // These all use 2 and check the patterns. All are true. + "*=2": true, + "?l*=2": true, + "????_*=2": true, + "??[mno]?_*t=2": true, + // These all use 2 and check the patterns. All are false. + "*x=2": false, + "m*=2": false, + "??_*=2": false, + "?[abc]?_*t=2": false, +} + +// Test that vmodule globbing works as advertised. +func testVmoduleGlob(pat string, match bool, t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + defer logging.vmodule.Set("") + logging.vmodule.Set(pat) + if V(2) != Verbose(match) { + t.Errorf("incorrect match for %q: got %t expected %t", pat, V(2), match) + } +} + +// Test that a vmodule globbing works as advertised. +func TestVmoduleGlob(t *testing.T) { + for glob, match := range vGlobs { + testVmoduleGlob(glob, match, t) + } +} + +func TestRollover(t *testing.T) { + setFlags() + var err error + defer func(previous func(error)) { logExitFunc = previous }(logExitFunc) + logExitFunc = func(e error) { + err = e + } + defer func(previous uint64) { MaxSize = previous }(MaxSize) + MaxSize = 512 + + Info("x") // Be sure we have a file. + info, ok := logging.file[infoLog].(*syncBuffer) + if !ok { + t.Fatal("info wasn't created") + } + if err != nil { + t.Fatalf("info has initial error: %v", err) + } + fname0 := info.file.Name() + Info(strings.Repeat("x", int(MaxSize))) // force a rollover + if err != nil { + t.Fatalf("info has error after big write: %v", err) + } + + // Make sure the next log file gets a file name with a different + // time stamp. + // + // TODO: determine whether we need to support subsecond log + // rotation. C++ does not appear to handle this case (nor does it + // handle Daylight Savings Time properly). + time.Sleep(1 * time.Second) + + Info("x") // create a new file + if err != nil { + t.Fatalf("error after rotation: %v", err) + } + fname1 := info.file.Name() + if fname0 == fname1 { + t.Errorf("info.f.Name did not change: %v", fname0) + } + if info.nbytes >= MaxSize { + t.Errorf("file size was not reset: %d", info.nbytes) + } +} + +func TestLogBacktraceAt(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + // The peculiar style of this code simplifies line counting and maintenance of the + // tracing block below. + var infoLine string + setTraceLocation := func(file string, line int, ok bool, delta int) { + if !ok { + t.Fatal("could not get file:line") + } + _, file = filepath.Split(file) + infoLine = fmt.Sprintf("%s:%d", file, line+delta) + err := logging.traceLocation.Set(infoLine) + if err != nil { + t.Fatal("error setting log_backtrace_at: ", err) + } + } + { + // Start of tracing block. These lines know about each other's relative position. + _, file, line, ok := runtime.Caller(0) + setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls. + Info("we want a stack trace here") + } + numAppearances := strings.Count(contents(infoLog), infoLine) + if numAppearances < 2 { + // Need 2 appearances, one in the log header and one in the trace: + // log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here + // ... + // github.com/glog/glog_test.go:280 (0x41ba91) + // ... + // We could be more precise but that would require knowing the details + // of the traceback format, which may not be dependable. + t.Fatal("got no trace back; log is ", contents(infoLog)) + } +} + +func BenchmarkHeader(b *testing.B) { + for i := 0; i < b.N; i++ { + buf, _, _ := logging.header(infoLog, 0) + logging.putBuffer(buf) + } +} diff --git a/vendor/github.com/golang/lint/BUILD b/vendor/github.com/golang/lint/BUILD index 2145fdd1e6b1..608fa33a497c 100644 --- a/vendor/github.com/golang/lint/BUILD +++ b/vendor/github.com/golang/lint/BUILD @@ -1,17 +1,20 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["lint.go"], importpath = "github.com/golang/lint", + visibility = ["//visibility:public"], deps = ["//vendor/golang.org/x/tools/go/gcexportdata:go_default_library"], ) +go_test( + name = "go_default_test", + srcs = ["lint_test.go"], + importpath = "github.com/golang/lint", + library = ":go_default_library", +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -23,4 +26,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/golang/lint/lint_test.go b/vendor/github.com/golang/lint/lint_test.go new file mode 100644 index 000000000000..92db5968e4d6 --- /dev/null +++ b/vendor/github.com/golang/lint/lint_test.go @@ -0,0 +1,317 @@ +// Copyright (c) 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +package lint + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "go/types" + "io/ioutil" + "path" + "regexp" + "strconv" + "strings" + "testing" +) + +var lintMatch = flag.String("lint.match", "", "restrict testdata matches to this pattern") + +func TestAll(t *testing.T) { + l := new(Linter) + rx, err := regexp.Compile(*lintMatch) + if err != nil { + t.Fatalf("Bad -lint.match value %q: %v", *lintMatch, err) + } + + baseDir := "testdata" + fis, err := ioutil.ReadDir(baseDir) + if err != nil { + t.Fatalf("ioutil.ReadDir: %v", err) + } + if len(fis) == 0 { + t.Fatalf("no files in %v", baseDir) + } + for _, fi := range fis { + if !rx.MatchString(fi.Name()) { + continue + } + //t.Logf("Testing %s", fi.Name()) + src, err := ioutil.ReadFile(path.Join(baseDir, fi.Name())) + if err != nil { + t.Fatalf("Failed reading %s: %v", fi.Name(), err) + } + + ins := parseInstructions(t, fi.Name(), src) + if ins == nil { + t.Errorf("Test file %v does not have instructions", fi.Name()) + continue + } + + ps, err := l.Lint(fi.Name(), src) + if err != nil { + t.Errorf("Linting %s: %v", fi.Name(), err) + continue + } + + for _, in := range ins { + ok := false + for i, p := range ps { + if p.Position.Line != in.Line { + continue + } + if in.Match.MatchString(p.Text) { + // check replacement if we are expecting one + if in.Replacement != "" { + // ignore any inline comments, since that would be recursive + r := p.ReplacementLine + if i := strings.Index(r, " //"); i >= 0 { + r = r[:i] + } + if r != in.Replacement { + t.Errorf("Lint failed at %s:%d; got replacement %q, want %q", fi.Name(), in.Line, r, in.Replacement) + } + } + + // remove this problem from ps + copy(ps[i:], ps[i+1:]) + ps = ps[:len(ps)-1] + + //t.Logf("/%v/ matched at %s:%d", in.Match, fi.Name(), in.Line) + ok = true + break + } + } + if !ok { + t.Errorf("Lint failed at %s:%d; /%v/ did not match", fi.Name(), in.Line, in.Match) + } + } + for _, p := range ps { + t.Errorf("Unexpected problem at %s:%d: %v", fi.Name(), p.Position.Line, p.Text) + } + } +} + +type instruction struct { + Line int // the line number this applies to + Match *regexp.Regexp // what pattern to match + Replacement string // what the suggested replacement line should be +} + +// parseInstructions parses instructions from the comments in a Go source file. +// It returns nil if none were parsed. +func parseInstructions(t *testing.T, filename string, src []byte) []instruction { + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, filename, src, parser.ParseComments) + if err != nil { + t.Fatalf("Test file %v does not parse: %v", filename, err) + } + var ins []instruction + for _, cg := range f.Comments { + ln := fset.Position(cg.Pos()).Line + raw := cg.Text() + for _, line := range strings.Split(raw, "\n") { + if line == "" || strings.HasPrefix(line, "#") { + continue + } + if line == "OK" && ins == nil { + // so our return value will be non-nil + ins = make([]instruction, 0) + continue + } + if strings.Contains(line, "MATCH") { + rx, err := extractPattern(line) + if err != nil { + t.Fatalf("At %v:%d: %v", filename, ln, err) + } + matchLine := ln + if i := strings.Index(line, "MATCH:"); i >= 0 { + // This is a match for a different line. + lns := strings.TrimPrefix(line[i:], "MATCH:") + lns = lns[:strings.Index(lns, " ")] + matchLine, err = strconv.Atoi(lns) + if err != nil { + t.Fatalf("Bad match line number %q at %v:%d: %v", lns, filename, ln, err) + } + } + var repl string + if r, ok := extractReplacement(line); ok { + repl = r + } + ins = append(ins, instruction{ + Line: matchLine, + Match: rx, + Replacement: repl, + }) + } + } + } + return ins +} + +func extractPattern(line string) (*regexp.Regexp, error) { + a, b := strings.Index(line, "/"), strings.LastIndex(line, "/") + if a == -1 || a == b { + return nil, fmt.Errorf("malformed match instruction %q", line) + } + pat := line[a+1 : b] + rx, err := regexp.Compile(pat) + if err != nil { + return nil, fmt.Errorf("bad match pattern %q: %v", pat, err) + } + return rx, nil +} + +func extractReplacement(line string) (string, bool) { + // Look for this: / -> ` + // (the end of a match and start of a backtick string), + // and then the closing backtick. + const start = "/ -> `" + a, b := strings.Index(line, start), strings.LastIndex(line, "`") + if a < 0 || a > b { + return "", false + } + return line[a+len(start) : b], true +} + +func render(fset *token.FileSet, x interface{}) string { + var buf bytes.Buffer + if err := printer.Fprint(&buf, fset, x); err != nil { + panic(err) + } + return buf.String() +} + +func TestLine(t *testing.T) { + tests := []struct { + src string + offset int + want string + }{ + {"single line file", 5, "single line file"}, + {"single line file with newline\n", 5, "single line file with newline\n"}, + {"first\nsecond\nthird\n", 2, "first\n"}, + {"first\nsecond\nthird\n", 9, "second\n"}, + {"first\nsecond\nthird\n", 14, "third\n"}, + {"first\nsecond\nthird with no newline", 16, "third with no newline"}, + {"first byte\n", 0, "first byte\n"}, + } + for _, test := range tests { + got := srcLine([]byte(test.src), token.Position{Offset: test.offset}) + if got != test.want { + t.Errorf("srcLine(%q, offset=%d) = %q, want %q", test.src, test.offset, got, test.want) + } + } +} + +func TestLintName(t *testing.T) { + tests := []struct { + name, want string + }{ + {"foo_bar", "fooBar"}, + {"foo_bar_baz", "fooBarBaz"}, + {"Foo_bar", "FooBar"}, + {"foo_WiFi", "fooWiFi"}, + {"id", "id"}, + {"Id", "ID"}, + {"foo_id", "fooID"}, + {"fooId", "fooID"}, + {"fooUid", "fooUID"}, + {"idFoo", "idFoo"}, + {"uidFoo", "uidFoo"}, + {"midIdDle", "midIDDle"}, + {"APIProxy", "APIProxy"}, + {"ApiProxy", "APIProxy"}, + {"apiProxy", "apiProxy"}, + {"_Leading", "_Leading"}, + {"___Leading", "_Leading"}, + {"trailing_", "trailing"}, + {"trailing___", "trailing"}, + {"a_b", "aB"}, + {"a__b", "aB"}, + {"a___b", "aB"}, + {"Rpc1150", "RPC1150"}, + {"case3_1", "case3_1"}, + {"case3__1", "case3_1"}, + {"IEEE802_16bit", "IEEE802_16bit"}, + {"IEEE802_16Bit", "IEEE802_16Bit"}, + } + for _, test := range tests { + got := lintName(test.name) + if got != test.want { + t.Errorf("lintName(%q) = %q, want %q", test.name, got, test.want) + } + } +} + +func TestExportedType(t *testing.T) { + tests := []struct { + typString string + exp bool + }{ + {"int", true}, + {"string", false}, // references the shadowed builtin "string" + {"T", true}, + {"t", false}, + {"*T", true}, + {"*t", false}, + {"map[int]complex128", true}, + } + for _, test := range tests { + src := `package foo; type T int; type t int; type string struct{}` + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, "foo.go", src, 0) + if err != nil { + t.Fatalf("Parsing %q: %v", src, err) + } + // use the package name as package path + config := &types.Config{} + pkg, err := config.Check(file.Name.Name, fset, []*ast.File{file}, nil) + if err != nil { + t.Fatalf("Type checking %q: %v", src, err) + } + tv, err := types.Eval(fset, pkg, token.NoPos, test.typString) + if err != nil { + t.Errorf("types.Eval(%q): %v", test.typString, err) + continue + } + if got := exportedType(tv.Type); got != test.exp { + t.Errorf("exportedType(%v) = %t, want %t", tv.Type, got, test.exp) + } + } +} + +func TestIsGenerated(t *testing.T) { + tests := []struct { + source string + generated bool + }{ + {"// Code Generated by some tool. DO NOT EDIT.", false}, + {"// Code generated by some tool. DO NOT EDIT.", true}, + {"// Code generated by some tool. DO NOT EDIT", false}, + {"// Code generated DO NOT EDIT.", true}, + {"// Code generated DO NOT EDIT.", false}, + {"\t\t// Code generated by some tool. DO NOT EDIT.\npackage foo\n", false}, + {"// Code generated by some tool. DO NOT EDIT.\npackage foo\n", true}, + {"package foo\n// Code generated by some tool. DO NOT EDIT.\ntype foo int\n", true}, + {"package foo\n // Code generated by some tool. DO NOT EDIT.\ntype foo int\n", false}, + {"package foo\n// Code generated by some tool. DO NOT EDIT. \ntype foo int\n", false}, + {"package foo\ntype foo int\n// Code generated by some tool. DO NOT EDIT.\n", true}, + {"package foo\ntype foo int\n// Code generated by some tool. DO NOT EDIT.", true}, + } + + for i, test := range tests { + got := isGenerated([]byte(test.source)) + if got != test.generated { + t.Errorf("test %d, isGenerated() = %v, want %v", i, got, test.generated) + } + } +} diff --git a/vendor/github.com/golang/mock/.gitignore b/vendor/github.com/golang/mock/.gitignore new file mode 100644 index 000000000000..4eb2f795091e --- /dev/null +++ b/vendor/github.com/golang/mock/.gitignore @@ -0,0 +1,17 @@ +# Object files and binaries from go. +*.[568] + +# Library files. +*.a + +# Any file prefixed by an underscore. +*/_* + +# Vim temporary files. +.*.swp + +# The mockgen binary. +mockgen/mockgen + +# A binary produced by gotest. +#gomock/[568]\.out diff --git a/vendor/github.com/golang/mock/AUTHORS b/vendor/github.com/golang/mock/AUTHORS new file mode 100644 index 000000000000..660b8ccc8ae0 --- /dev/null +++ b/vendor/github.com/golang/mock/AUTHORS @@ -0,0 +1,12 @@ +# This is the official list of GoMock authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Alex Reece +Google Inc. diff --git a/vendor/github.com/golang/mock/CONTRIBUTORS b/vendor/github.com/golang/mock/CONTRIBUTORS new file mode 100644 index 000000000000..def849cab1bd --- /dev/null +++ b/vendor/github.com/golang/mock/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute (and typically +# have contributed) code to the gomock repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name +# +# An entry with two email addresses specifies that the +# first address should be used in the submit logs and +# that the second address should be recognized as the +# same person when interacting with Rietveld. + +# Please keep the list sorted. + +Aaron Jacobs +Alex Reece +David Symonds +Ryan Barrett diff --git a/vendor/github.com/golang/mock/README.md b/vendor/github.com/golang/mock/README.md new file mode 100644 index 000000000000..f74920748f48 --- /dev/null +++ b/vendor/github.com/golang/mock/README.md @@ -0,0 +1,81 @@ +GoMock is a mocking framework for the [Go programming language][golang]. It +integrates well with Go's built-in `testing` package, but can be used in other +contexts too. + + +Installation +------------ + +Once you have [installed Go][golang-install], run these commands +to install the `gomock` package and the `mockgen` tool: + + go get github.com/golang/mock/gomock + go get github.com/golang/mock/mockgen + + +Documentation +------------- + +After installing, you can use `go doc` to get documentation: + + go doc github.com/golang/mock/gomock + +Alternatively, there is an online reference for the package hosted on GoPkgDoc +[here][gomock-ref]. + + +Running mockgen +--------------- + +`mockgen` has two modes of operation: source and reflect. +Source mode generates mock interfaces from a source file. +It is enabled by using the -source flag. Other flags that +may be useful in this mode are -imports and -aux_files. + +Example: + + mockgen -source=foo.go [other options] + +Reflect mode generates mock interfaces by building a program +that uses reflection to understand interfaces. It is enabled +by passing two non-flag arguments: an import path, and a +comma-separated list of symbols. + +Example: + + mockgen database/sql/driver Conn,Driver + +The `mockgen` command is used to generate source code for a mock +class given a Go source file containing interfaces to be mocked. +It supports the following flags: + + * `-source`: A file containing interfaces to be mocked. + + * `-destination`: A file to which to write the resulting source code. If you + don't set this, the code is printed to standard output. + + * `-package`: The package to use for the resulting mock class + source code. If you don't set this, the package name is `mock_` concatenated + with the package of the input file. + + * `-imports`: A list of explicit imports that should be used in the resulting + source code, specified as a comma-separated list of elements of the form + `foo=bar/baz`, where `bar/baz` is the package being imported and `foo` is + the identifier to use for the package in the generated source code. + + * `-aux_files`: A list of additional files that should be consulted to + resolve e.g. embedded interfaces defined in a different file. This is + specified as a comma-separated list of elements of the form + `foo=bar/baz.go`, where `bar/baz.go` is the source file and `foo` is the + package name of that file used by the -source file. + +For an example of the use of `mockgen`, see the `sample/` directory. In simple +cases, you will need only the `-source` flag. + + +TODO: Brief overview of how to create mock objects and set up expectations, and +an example. + +[golang]: http://golang.org/ +[golang-install]: http://golang.org/doc/install.html#releases +[gomock-ref]: http://godoc.org/github.com/golang/mock/gomock diff --git a/vendor/github.com/golang/mock/gomock/BUILD b/vendor/github.com/golang/mock/gomock/BUILD index edf3aca56d6c..cd0d4db0f8c5 100644 --- a/vendor/github.com/golang/mock/gomock/BUILD +++ b/vendor/github.com/golang/mock/gomock/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -14,6 +9,20 @@ go_library( "matchers.go", ], importpath = "github.com/golang/mock/gomock", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_xtest", + srcs = [ + "controller_test.go", + "matchers_test.go", + ], + importpath = "github.com/golang/mock/gomock_test", + deps = [ + ":go_default_library", + "//vendor/github.com/golang/mock/gomock/mock_matcher:go_default_library", + ], ) filegroup( @@ -27,4 +36,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/golang/mock/gomock/controller_test.go b/vendor/github.com/golang/mock/gomock/controller_test.go new file mode 100644 index 000000000000..ac5204f807d2 --- /dev/null +++ b/vendor/github.com/golang/mock/gomock/controller_test.go @@ -0,0 +1,475 @@ +// Copyright 2011 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gomock_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/golang/mock/gomock" +) + +type ErrorReporter struct { + t *testing.T + log []string + failed bool + fatalToken struct{} +} + +func NewErrorReporter(t *testing.T) *ErrorReporter { + return &ErrorReporter{t: t} +} + +func (e *ErrorReporter) reportLog() { + for _, entry := range e.log { + e.t.Log(entry) + } +} + +func (e *ErrorReporter) assertPass(msg string) { + if e.failed { + e.t.Errorf("Expected pass, but got failure(s): %s", msg) + e.reportLog() + } +} + +func (e *ErrorReporter) assertFail(msg string) { + if !e.failed { + e.t.Error("Expected failure, but got pass: %s", msg) + } +} + +// Use to check that code triggers a fatal test failure. +func (e *ErrorReporter) assertFatal(fn func()) { + defer func() { + err := recover() + if err == nil { + var actual string + if e.failed { + actual = "non-fatal failure" + } else { + actual = "pass" + } + e.t.Error("Expected fatal failure, but got a", actual) + } else if token, ok := err.(*struct{}); ok && token == &e.fatalToken { + // This is okay - the panic is from Fatalf(). + return + } else { + // Some other panic. + panic(err) + } + }() + + fn() +} + +// recoverUnexpectedFatal can be used as a deferred call in test cases to +// recover from and display a call to ErrorReporter.Fatalf(). +func (e *ErrorReporter) recoverUnexpectedFatal() { + err := recover() + if err == nil { + // No panic. + } else if token, ok := err.(*struct{}); ok && token == &e.fatalToken { + // Unexpected fatal error happened. + e.t.Error("Got unexpected fatal error(s). All errors up to this point:") + e.reportLog() + return + } else { + // Some other panic. + panic(err) + } +} + +func (e *ErrorReporter) Logf(format string, args ...interface{}) { + e.log = append(e.log, fmt.Sprintf(format, args...)) +} + +func (e *ErrorReporter) Errorf(format string, args ...interface{}) { + e.Logf(format, args...) + e.failed = true +} + +func (e *ErrorReporter) Fatalf(format string, args ...interface{}) { + e.Logf(format, args...) + e.failed = true + panic(&e.fatalToken) +} + +// A type purely for use as a receiver in testing the Controller. +type Subject struct{} + +func (s *Subject) FooMethod(arg string) int { + return 0 +} + +func (s *Subject) BarMethod(arg string) int { + return 0 +} + +func assertEqual(t *testing.T, expected interface{}, actual interface{}) { + if !reflect.DeepEqual(expected, actual) { + t.Error("Expected %+v, but got %+v", expected, actual) + } +} + +func createFixtures(t *testing.T) (reporter *ErrorReporter, ctrl *gomock.Controller) { + // reporter acts as a testing.T-like object that we pass to the + // Controller. We use it to test that the mock considered tests + // successful or failed. + reporter = NewErrorReporter(t) + ctrl = gomock.NewController(reporter) + return +} + +func TestNoCalls(t *testing.T) { + reporter, ctrl := createFixtures(t) + ctrl.Finish() + reporter.assertPass("No calls expected or made.") +} + +func TestExpectedMethodCall(t *testing.T) { + reporter, ctrl := createFixtures(t) + subject := new(Subject) + + ctrl.RecordCall(subject, "FooMethod", "argument") + ctrl.Call(subject, "FooMethod", "argument") + ctrl.Finish() + + reporter.assertPass("Expected method call made.") +} + +func TestUnexpectedMethodCall(t *testing.T) { + reporter, ctrl := createFixtures(t) + subject := new(Subject) + + reporter.assertFatal(func() { + ctrl.Call(subject, "FooMethod", "argument") + }) + + ctrl.Finish() +} + +func TestRepeatedCall(t *testing.T) { + reporter, ctrl := createFixtures(t) + subject := new(Subject) + + ctrl.RecordCall(subject, "FooMethod", "argument").Times(3) + ctrl.Call(subject, "FooMethod", "argument") + ctrl.Call(subject, "FooMethod", "argument") + ctrl.Call(subject, "FooMethod", "argument") + reporter.assertPass("After expected repeated method calls.") + reporter.assertFatal(func() { + ctrl.Call(subject, "FooMethod", "argument") + }) + ctrl.Finish() + reporter.assertFail("After calling one too many times.") +} + +func TestUnexpectedArgCount(t *testing.T) { + reporter, ctrl := createFixtures(t) + defer reporter.recoverUnexpectedFatal() + subject := new(Subject) + + ctrl.RecordCall(subject, "FooMethod", "argument") + reporter.assertFatal(func() { + // This call is made with the wrong number of arguments... + ctrl.Call(subject, "FooMethod", "argument", "extra_argument") + }) + reporter.assertFatal(func() { + // ... so is this. + ctrl.Call(subject, "FooMethod") + }) + reporter.assertFatal(func() { + // The expected call wasn't made. + ctrl.Finish() + }) +} + +func TestAnyTimes(t *testing.T) { + reporter, ctrl := createFixtures(t) + subject := new(Subject) + + ctrl.RecordCall(subject, "FooMethod", "argument").AnyTimes() + for i := 0; i < 100; i++ { + ctrl.Call(subject, "FooMethod", "argument") + } + reporter.assertPass("After 100 method calls.") + ctrl.Finish() +} + +func TestMinTimes1(t *testing.T) { + // It fails if there are no calls + reporter, ctrl := createFixtures(t) + subject := new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MinTimes(1) + reporter.assertFatal(func() { + ctrl.Finish() + }) + + // It succeeds if there is one call + reporter, ctrl = createFixtures(t) + subject = new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MinTimes(1) + ctrl.Call(subject, "FooMethod", "argument") + ctrl.Finish() + + // It succeeds if there are many calls + reporter, ctrl = createFixtures(t) + subject = new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MinTimes(1) + for i := 0; i < 100; i++ { + ctrl.Call(subject, "FooMethod", "argument") + } + ctrl.Finish() +} + +func TestMaxTimes1(t *testing.T) { + // It succeeds if there are no calls + _, ctrl := createFixtures(t) + subject := new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MaxTimes(1) + ctrl.Finish() + + // It succeeds if there is one call + _, ctrl = createFixtures(t) + subject = new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MaxTimes(1) + ctrl.Call(subject, "FooMethod", "argument") + ctrl.Finish() + + //It fails if there are more + reporter, ctrl := createFixtures(t) + subject = new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MaxTimes(1) + ctrl.Call(subject, "FooMethod", "argument") + reporter.assertFatal(func() { + ctrl.Call(subject, "FooMethod", "argument") + }) + ctrl.Finish() +} + +func TestMinMaxTimes(t *testing.T) { + // It fails if there are less calls than specified + reporter, ctrl := createFixtures(t) + subject := new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MinTimes(2).MaxTimes(2) + ctrl.Call(subject, "FooMethod", "argument") + reporter.assertFatal(func() { + ctrl.Finish() + }) + + // It fails if there are more calls than specified + reporter, ctrl = createFixtures(t) + subject = new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MinTimes(2).MaxTimes(2) + ctrl.Call(subject, "FooMethod", "argument") + ctrl.Call(subject, "FooMethod", "argument") + reporter.assertFatal(func() { + ctrl.Call(subject, "FooMethod", "argument") + }) + + // It succeeds if there is just the right number of calls + reporter, ctrl = createFixtures(t) + subject = new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MaxTimes(2).MinTimes(2) + ctrl.Call(subject, "FooMethod", "argument") + ctrl.Call(subject, "FooMethod", "argument") + ctrl.Finish() +} + +func TestDo(t *testing.T) { + _, ctrl := createFixtures(t) + subject := new(Subject) + + doCalled := false + var argument string + ctrl.RecordCall(subject, "FooMethod", "argument").Do( + func(arg string) { + doCalled = true + argument = arg + }) + if doCalled { + t.Error("Do() callback called too early.") + } + + ctrl.Call(subject, "FooMethod", "argument") + + if !doCalled { + t.Error("Do() callback not called.") + } + if "argument" != argument { + t.Error("Do callback received wrong argument.") + } + + ctrl.Finish() +} + +func TestReturn(t *testing.T) { + _, ctrl := createFixtures(t) + subject := new(Subject) + + // Unspecified return should produce "zero" result. + ctrl.RecordCall(subject, "FooMethod", "zero") + ctrl.RecordCall(subject, "FooMethod", "five").Return(5) + + assertEqual( + t, + []interface{}{0}, + ctrl.Call(subject, "FooMethod", "zero")) + + assertEqual( + t, + []interface{}{5}, + ctrl.Call(subject, "FooMethod", "five")) + ctrl.Finish() +} + +func TestUnorderedCalls(t *testing.T) { + reporter, ctrl := createFixtures(t) + defer reporter.recoverUnexpectedFatal() + subjectTwo := new(Subject) + subjectOne := new(Subject) + + ctrl.RecordCall(subjectOne, "FooMethod", "1") + ctrl.RecordCall(subjectOne, "BarMethod", "2") + ctrl.RecordCall(subjectTwo, "FooMethod", "3") + ctrl.RecordCall(subjectTwo, "BarMethod", "4") + + // Make the calls in a different order, which should be fine. + ctrl.Call(subjectOne, "BarMethod", "2") + ctrl.Call(subjectTwo, "FooMethod", "3") + ctrl.Call(subjectTwo, "BarMethod", "4") + ctrl.Call(subjectOne, "FooMethod", "1") + + reporter.assertPass("After making all calls in different order") + + ctrl.Finish() + + reporter.assertPass("After finish") +} + +func commonTestOrderedCalls(t *testing.T) (reporter *ErrorReporter, ctrl *gomock.Controller, subjectOne, subjectTwo *Subject) { + reporter, ctrl = createFixtures(t) + + subjectOne = new(Subject) + subjectTwo = new(Subject) + + gomock.InOrder( + ctrl.RecordCall(subjectOne, "FooMethod", "1").AnyTimes(), + ctrl.RecordCall(subjectTwo, "FooMethod", "2"), + ctrl.RecordCall(subjectTwo, "BarMethod", "3"), + ) + + return +} + +func TestOrderedCallsCorrect(t *testing.T) { + reporter, ctrl, subjectOne, subjectTwo := commonTestOrderedCalls(t) + + ctrl.Call(subjectOne, "FooMethod", "1") + ctrl.Call(subjectTwo, "FooMethod", "2") + ctrl.Call(subjectTwo, "BarMethod", "3") + + ctrl.Finish() + + reporter.assertPass("After finish") +} + +func TestOrderedCallsInCorrect(t *testing.T) { + reporter, ctrl, subjectOne, subjectTwo := commonTestOrderedCalls(t) + + ctrl.Call(subjectOne, "FooMethod", "1") + reporter.assertFatal(func() { + ctrl.Call(subjectTwo, "BarMethod", "3") + }) +} + +// Test that calls that are prerequites to other calls but have maxCalls > +// minCalls are removed from the expected call set. +func TestOrderedCallsWithPreReqMaxUnbounded(t *testing.T) { + reporter, ctrl, subjectOne, subjectTwo := commonTestOrderedCalls(t) + + // Initially we should be able to call FooMethod("1") as many times as we + // want. + ctrl.Call(subjectOne, "FooMethod", "1") + ctrl.Call(subjectOne, "FooMethod", "1") + + // But calling something that has it as a prerequite should remove it from + // the expected call set. This allows tests to ensure that FooMethod("1") is + // *not* called after FooMethod("2"). + ctrl.Call(subjectTwo, "FooMethod", "2") + + // Therefore this call should fail: + reporter.assertFatal(func() { + ctrl.Call(subjectOne, "FooMethod", "1") + }) +} + +func TestCallAfterLoopPanic(t *testing.T) { + _, ctrl := createFixtures(t) + + subject := new(Subject) + + firstCall := ctrl.RecordCall(subject, "Foo", "1") + secondCall := ctrl.RecordCall(subject, "Foo", "2") + thirdCall := ctrl.RecordCall(subject, "Foo", "3") + + gomock.InOrder(firstCall, secondCall, thirdCall) + + defer func() { + err := recover() + if err == nil { + t.Error("Call.After creation of dependency loop did not panic.") + } + }() + + // This should panic due to dependency loop. + firstCall.After(thirdCall) +} + +func TestPanicOverridesExpectationChecks(t *testing.T) { + ctrl := gomock.NewController(t) + reporter := NewErrorReporter(t) + + reporter.assertFatal(func() { + ctrl.RecordCall(new(Subject), "FooMethod", "1") + defer ctrl.Finish() + reporter.Fatalf("Intentional panic") + }) +} + +func TestSetArgWithBadType(t *testing.T) { + rep, ctrl := createFixtures(t) + defer ctrl.Finish() + + s := new(Subject) + // This should catch a type error: + rep.assertFatal(func() { + ctrl.RecordCall(s, "FooMethod", "1").SetArg(0, "blah") + }) + ctrl.Call(s, "FooMethod", "1") +} + +func TestTimes0(t *testing.T) { + rep, ctrl := createFixtures(t) + defer ctrl.Finish() + + s := new(Subject) + ctrl.RecordCall(s, "FooMethod", "arg").Times(0) + rep.assertFatal(func() { + ctrl.Call(s, "FooMethod", "arg") + }) +} diff --git a/vendor/github.com/golang/mock/gomock/matchers_test.go b/vendor/github.com/golang/mock/gomock/matchers_test.go new file mode 100644 index 000000000000..29b97fb9c8e3 --- /dev/null +++ b/vendor/github.com/golang/mock/gomock/matchers_test.go @@ -0,0 +1,70 @@ +// Copyright 2010 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gomock_test + +import ( + "errors" + "testing" + + "github.com/golang/mock/gomock" + mock_matcher "github.com/golang/mock/gomock/mock_matcher" +) + +func TestMatchers(t *testing.T) { + type e interface{} + type testCase struct { + matcher gomock.Matcher + yes, no []e + } + tests := []testCase{ + testCase{gomock.Any(), []e{3, nil, "foo"}, nil}, + testCase{gomock.Eq(4), []e{4}, []e{3, "blah", nil, int64(4)}}, + testCase{gomock.Nil(), + []e{nil, (error)(nil), (chan bool)(nil), (*int)(nil)}, + []e{"", 0, make(chan bool), errors.New("err"), new(int)}}, + testCase{gomock.Not(gomock.Eq(4)), []e{3, "blah", nil, int64(4)}, []e{4}}, + } + for i, test := range tests { + for _, x := range test.yes { + if !test.matcher.Matches(x) { + t.Errorf(`test %d: "%v %s" should be true.`, i, x, test.matcher) + } + } + for _, x := range test.no { + if test.matcher.Matches(x) { + t.Errorf(`test %d: "%v %s" should be false.`, i, x, test.matcher) + } + } + } +} + +// A more thorough test of notMatcher +func TestNotMatcher(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockMatcher := mock_matcher.NewMockMatcher(ctrl) + notMatcher := gomock.Not(mockMatcher) + + mockMatcher.EXPECT().Matches(4).Return(true) + if match := notMatcher.Matches(4); match { + t.Errorf("notMatcher should not match 4") + } + + mockMatcher.EXPECT().Matches(5).Return(false) + if match := notMatcher.Matches(5); !match { + t.Errorf("notMatcher should match 5") + } +} diff --git a/vendor/github.com/golang/mock/update_mocks.sh b/vendor/github.com/golang/mock/update_mocks.sh new file mode 100755 index 000000000000..3fe3932c2fb4 --- /dev/null +++ b/vendor/github.com/golang/mock/update_mocks.sh @@ -0,0 +1,9 @@ +#! /bin/bash -e + +mockgen github.com/golang/mock/gomock Matcher \ + > gomock/mock_matcher/mock_matcher.go +mockgen github.com/golang/mock/sample Index,Embed,Embedded \ + > sample/mock_user/mock_user.go +gofmt -w gomock/mock_matcher/mock_matcher.go sample/mock_user/mock_user.go + +echo >&2 "OK" diff --git a/vendor/github.com/golang/protobuf/.gitignore b/vendor/github.com/golang/protobuf/.gitignore new file mode 100644 index 000000000000..8f5b596b1479 --- /dev/null +++ b/vendor/github.com/golang/protobuf/.gitignore @@ -0,0 +1,16 @@ +.DS_Store +*.[568ao] +*.ao +*.so +*.pyc +._* +.nfs.* +[568a].out +*~ +*.orig +core +_obj +_test +_testmain.go +protoc-gen-go/testdata/multi/*.pb.go +_conformance/_conformance diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS new file mode 100644 index 000000000000..15167cd746c5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS new file mode 100644 index 000000000000..1c4577e96806 --- /dev/null +++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/Make.protobuf b/vendor/github.com/golang/protobuf/Make.protobuf new file mode 100644 index 000000000000..15071de10112 --- /dev/null +++ b/vendor/github.com/golang/protobuf/Make.protobuf @@ -0,0 +1,40 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Includable Makefile to add a rule for generating .pb.go files from .proto files +# (Google protocol buffer descriptions). +# Typical use if myproto.proto is a file in package mypackage in this directory: +# +# include $(GOROOT)/src/pkg/github.com/golang/protobuf/Make.protobuf + +%.pb.go: %.proto + protoc --go_out=. $< + diff --git a/vendor/github.com/golang/protobuf/Makefile b/vendor/github.com/golang/protobuf/Makefile new file mode 100644 index 000000000000..a1421d8b7310 --- /dev/null +++ b/vendor/github.com/golang/protobuf/Makefile @@ -0,0 +1,55 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +all: install + +install: + go install ./proto ./jsonpb ./ptypes + go install ./protoc-gen-go + +test: + go test ./proto ./jsonpb ./ptypes + make -C protoc-gen-go/testdata test + +clean: + go clean ./... + +nuke: + go clean -i ./... + +regenerate: + make -C protoc-gen-go/descriptor regenerate + make -C protoc-gen-go/plugin regenerate + make -C protoc-gen-go/testdata regenerate + make -C proto/testdata regenerate + make -C jsonpb/jsonpb_test_proto regenerate + make -C _conformance regenerate diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md new file mode 100644 index 000000000000..037fc7c8e2d3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/README.md @@ -0,0 +1,241 @@ +# Go support for Protocol Buffers + +Google's data interchange format. +Copyright 2010 The Go Authors. +https://github.com/golang/protobuf + +This package and the code it generates requires at least Go 1.4. + +This software implements Go bindings for protocol buffers. For +information about protocol buffers themselves, see + https://developers.google.com/protocol-buffers/ + +## Installation ## + +To use this software, you must: +- Install the standard C++ implementation of protocol buffers from + https://developers.google.com/protocol-buffers/ +- Of course, install the Go compiler and tools from + https://golang.org/ + See + https://golang.org/doc/install + for details or, if you are using gccgo, follow the instructions at + https://golang.org/doc/install/gccgo +- Grab the code from the repository and install the proto package. + The simplest way is to run `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`. + The compiler plugin, protoc-gen-go, will be installed in $GOBIN, + defaulting to $GOPATH/bin. It must be in your $PATH for the protocol + compiler, protoc, to find it. + +This software has two parts: a 'protocol compiler plugin' that +generates Go source files that, once compiled, can access and manage +protocol buffers; and a library that implements run-time support for +encoding (marshaling), decoding (unmarshaling), and accessing protocol +buffers. + +There is support for gRPC in Go using protocol buffers. +See the note at the bottom of this file for details. + +There are no insertion points in the plugin. + + +## Using protocol buffers with Go ## + +Once the software is installed, there are two steps to using it. +First you must compile the protocol buffer definitions and then import +them, with the support library, into your program. + +To compile the protocol buffer definition, run protoc with the --go_out +parameter set to the directory you want to output the Go code to. + + protoc --go_out=. *.proto + +The generated files will be suffixed .pb.go. See the Test code below +for an example using such a file. + + +The package comment for the proto library contains text describing +the interface provided in Go for protocol buffers. Here is an edited +version. + +========== + +The proto package converts data structures to and from the +wire format of protocol buffers. It works in concert with the +Go source code generated for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + Helpers for getting values are superseded by the + GetFoo methods and their use is deprecated. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed with the enum's type name. Enum types have + a String method, and a Enum method to assist in message construction. + - Nested groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. + - Enum types do not get an Enum method. + +Consider file test.proto, containing + +```proto + package example; + + enum FOO { X = 17; }; + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + } +``` + +To create and play with a Test object from the example package, + +```go + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + "path/to/example" + ) + + func main() { + test := &example.Test { + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &example.Test_OptionalGroup { + RequiredField: proto.String("good bye"), + }, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &example.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // etc. + } +``` + +## Parameters ## + +To pass extra parameters to the plugin, use a comma-separated +parameter list separated from the output directory by a colon: + + + protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto + + +- `import_prefix=xxx` - a prefix that is added onto the beginning of + all imports. Useful for things like generating protos in a + subdirectory, or regenerating vendored protobufs in-place. +- `import_path=foo/bar` - used as the package if no input files + declare `go_package`. If it contains slashes, everything up to the + rightmost slash is ignored. +- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to + load. The only plugin in this repo is `grpc`. +- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is + associated with Go package quux/shme. This is subject to the + import_prefix parameter. + +## gRPC Support ## + +If a proto file specifies RPC services, protoc-gen-go can be instructed to +generate code compatible with gRPC (http://www.grpc.io/). To do this, pass +the `plugins` parameter to protoc-gen-go; the usual way is to insert it into +the --go_out argument to protoc: + + protoc --go_out=plugins=grpc:. *.proto + +## Compatibility ## + +The library and the generated code are expected to be stable over time. +However, we reserve the right to make breaking changes without notice for the +following reasons: + +- Security. A security issue in the specification or implementation may come to + light whose resolution requires breaking compatibility. We reserve the right + to address such security issues. +- Unspecified behavior. There are some aspects of the Protocol Buffers + specification that are undefined. Programs that depend on such unspecified + behavior may break in future releases. +- Specification errors or changes. If it becomes necessary to address an + inconsistency, incompleteness, or change in the Protocol Buffers + specification, resolving the issue could affect the meaning or legality of + existing programs. We reserve the right to address such issues, including + updating the implementations. +- Bugs. If the library has a bug that violates the specification, a program + that depends on the buggy behavior may break if the bug is fixed. We reserve + the right to fix such bugs. +- Adding methods or fields to generated structs. These may conflict with field + names that already exist in a schema, causing applications to break. When the + code generator encounters a field in the schema that would collide with a + generated field or method name, the code generator will append an underscore + to the generated field or method name. +- Adding, removing, or changing methods or fields in generated structs that + start with `XXX`. These parts of the generated code are exported out of + necessity, but should not be considered part of the public API. +- Adding, removing, or changing unexported symbols in generated code. + +Any breaking changes outside of these will be announced 6 months in advance to +protobuf@googlegroups.com. + +You should, whenever possible, use generated code created by the `protoc-gen-go` +tool built at the same commit as the `proto` package. The `proto` package +declares package-level constants in the form `ProtoPackageIsVersionX`. +Application code and generated code may depend on one of these constants to +ensure that compilation will fail if the available version of the proto library +is too old. Whenever we make a change to the generated code that requires newer +library support, in the same commit we will increment the version number of the +generated code and declare a new package-level constant whose name incorporates +the latest version number. Removing a compatibility constant is considered a +breaking change and would be subject to the announcement policy stated above. + +The `protoc-gen-go/generator` package exposes a plugin interface, +which is used by the gRPC code generation. This interface is not +supported and is subject to incompatible changes without notice. diff --git a/vendor/github.com/golang/protobuf/proto/BUILD b/vendor/github.com/golang/protobuf/proto/BUILD index f11e2f6ce647..fd6d235d5a81 100644 --- a/vendor/github.com/golang/protobuf/proto/BUILD +++ b/vendor/github.com/golang/protobuf/proto/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -21,6 +16,44 @@ go_library( "text_parser.go", ], importpath = "github.com/golang/protobuf/proto", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = [ + "message_set_test.go", + "size2_test.go", + ], + importpath = "github.com/golang/protobuf/proto", + library = ":go_default_library", +) + +go_test( + name = "go_default_xtest", + srcs = [ + "all_test.go", + "any_test.go", + "clone_test.go", + "decode_test.go", + "encode_test.go", + "equal_test.go", + "extensions_test.go", + "map_test.go", + "proto3_test.go", + "size_test.go", + "text_parser_test.go", + "text_test.go", + ], + importpath = "github.com/golang/protobuf/proto_test", + deps = [ + ":go_default_library", + "//vendor/github.com/golang/protobuf/proto/proto3_proto:go_default_library", + "//vendor/github.com/golang/protobuf/proto/testdata:go_default_library", + "//vendor/github.com/golang/protobuf/ptypes:go_default_library", + "//vendor/github.com/golang/protobuf/ptypes/any:go_default_library", + "//vendor/golang.org/x/sync/errgroup:go_default_library", + ], ) filegroup( @@ -34,4 +67,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/golang/protobuf/proto/all_test.go b/vendor/github.com/golang/protobuf/proto/all_test.go new file mode 100644 index 000000000000..41451a40734a --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/all_test.go @@ -0,0 +1,2278 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math" + "math/rand" + "reflect" + "runtime/debug" + "strings" + "testing" + "time" + + . "github.com/golang/protobuf/proto" + . "github.com/golang/protobuf/proto/testdata" +) + +var globalO *Buffer + +func old() *Buffer { + if globalO == nil { + globalO = NewBuffer(nil) + } + globalO.Reset() + return globalO +} + +func equalbytes(b1, b2 []byte, t *testing.T) { + if len(b1) != len(b2) { + t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) + return + } + for i := 0; i < len(b1); i++ { + if b1[i] != b2[i] { + t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) + } + } +} + +func initGoTestField() *GoTestField { + f := new(GoTestField) + f.Label = String("label") + f.Type = String("type") + return f +} + +// These are all structurally equivalent but the tag numbers differ. +// (It's remarkable that required, optional, and repeated all have +// 8 letters.) +func initGoTest_RequiredGroup() *GoTest_RequiredGroup { + return &GoTest_RequiredGroup{ + RequiredField: String("required"), + } +} + +func initGoTest_OptionalGroup() *GoTest_OptionalGroup { + return &GoTest_OptionalGroup{ + RequiredField: String("optional"), + } +} + +func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { + return &GoTest_RepeatedGroup{ + RequiredField: String("repeated"), + } +} + +func initGoTest(setdefaults bool) *GoTest { + pb := new(GoTest) + if setdefaults { + pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) + pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) + pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) + pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) + pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) + pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) + pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) + pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) + pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) + pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) + pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted + pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) + pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) + } + + pb.Kind = GoTest_TIME.Enum() + pb.RequiredField = initGoTestField() + pb.F_BoolRequired = Bool(true) + pb.F_Int32Required = Int32(3) + pb.F_Int64Required = Int64(6) + pb.F_Fixed32Required = Uint32(32) + pb.F_Fixed64Required = Uint64(64) + pb.F_Uint32Required = Uint32(3232) + pb.F_Uint64Required = Uint64(6464) + pb.F_FloatRequired = Float32(3232) + pb.F_DoubleRequired = Float64(6464) + pb.F_StringRequired = String("string") + pb.F_BytesRequired = []byte("bytes") + pb.F_Sint32Required = Int32(-32) + pb.F_Sint64Required = Int64(-64) + pb.Requiredgroup = initGoTest_RequiredGroup() + + return pb +} + +func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { + data := b.Bytes() + ld := len(data) + ls := len(s) / 2 + + fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) + + // find the interesting spot - n + n := ls + if ld < ls { + n = ld + } + j := 0 + for i := 0; i < n; i++ { + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + if data[i] == bs { + continue + } + n = i + break + } + l := n - 10 + if l < 0 { + l = 0 + } + h := n + 10 + + // find the interesting spot - n + fmt.Printf("is[%d]:", l) + for i := l; i < h; i++ { + if i >= ld { + fmt.Printf(" --") + continue + } + fmt.Printf(" %.2x", data[i]) + } + fmt.Printf("\n") + + fmt.Printf("sb[%d]:", l) + for i := l; i < h; i++ { + if i >= ls { + fmt.Printf(" --") + continue + } + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + fmt.Printf(" %.2x", bs) + } + fmt.Printf("\n") + + t.Fail() + + // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) + // Print the output in a partially-decoded format; can + // be helpful when updating the test. It produces the output + // that is pasted, with minor edits, into the argument to verify(). + // data := b.Bytes() + // nesting := 0 + // for b.Len() > 0 { + // start := len(data) - b.Len() + // var u uint64 + // u, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // wire := u & 0x7 + // tag := u >> 3 + // switch wire { + // case WireVarint: + // v, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed32: + // v, err := DecodeFixed32(b) + // if err != nil { + // fmt.Printf("decode error on fixed32:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed64: + // v, err := DecodeFixed64(b) + // if err != nil { + // fmt.Printf("decode error on fixed64:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireBytes: + // nb, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // after_tag := len(data) - b.Len() + // str := make([]byte, nb) + // _, err = b.Read(str) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", + // data[start:after_tag], str, tag, wire) + // case WireStartGroup: + // nesting++ + // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // case WireEndGroup: + // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // nesting-- + // default: + // fmt.Printf("unrecognized wire type %d\n", wire) + // return + // } + // } +} + +func hex(c uint8) uint8 { + if '0' <= c && c <= '9' { + return c - '0' + } + if 'a' <= c && c <= 'f' { + return 10 + c - 'a' + } + if 'A' <= c && c <= 'F' { + return 10 + c - 'A' + } + return 0 +} + +func equal(b []byte, s string, t *testing.T) bool { + if 2*len(b) != len(s) { + // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) + fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) + return false + } + for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { + x := hex(s[j])*16 + hex(s[j+1]) + if b[i] != x { + // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) + fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) + return false + } + } + return true +} + +func overify(t *testing.T, pb *GoTest, expected string) { + o := old() + err := o.Marshal(pb) + if err != nil { + fmt.Printf("overify marshal-1 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 1", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = o.Unmarshal(pbd) + if err != nil { + t.Fatalf("overify unmarshal err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + o.Reset() + err = o.Marshal(pbd) + if err != nil { + t.Errorf("overify marshal-2 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 2", o.Bytes()) + t.Fatalf("string = %s", expected) + } +} + +// Simple tests for numeric encode/decode primitives (varint, etc.) +func TestNumericPrimitives(t *testing.T) { + for i := uint64(0); i < 1e6; i += 111 { + o := old() + if o.EncodeVarint(i) != nil { + t.Error("EncodeVarint") + break + } + x, e := o.DecodeVarint() + if e != nil { + t.Fatal("DecodeVarint") + } + if x != i { + t.Fatal("varint decode fail:", i, x) + } + + o = old() + if o.EncodeFixed32(i) != nil { + t.Fatal("encFixed32") + } + x, e = o.DecodeFixed32() + if e != nil { + t.Fatal("decFixed32") + } + if x != i { + t.Fatal("fixed32 decode fail:", i, x) + } + + o = old() + if o.EncodeFixed64(i*1234567) != nil { + t.Error("encFixed64") + break + } + x, e = o.DecodeFixed64() + if e != nil { + t.Error("decFixed64") + break + } + if x != i*1234567 { + t.Error("fixed64 decode fail:", i*1234567, x) + break + } + + o = old() + i32 := int32(i - 12345) + if o.EncodeZigzag32(uint64(i32)) != nil { + t.Fatal("EncodeZigzag32") + } + x, e = o.DecodeZigzag32() + if e != nil { + t.Fatal("DecodeZigzag32") + } + if x != uint64(uint32(i32)) { + t.Fatal("zigzag32 decode fail:", i32, x) + } + + o = old() + i64 := int64(i - 12345) + if o.EncodeZigzag64(uint64(i64)) != nil { + t.Fatal("EncodeZigzag64") + } + x, e = o.DecodeZigzag64() + if e != nil { + t.Fatal("DecodeZigzag64") + } + if x != uint64(i64) { + t.Fatal("zigzag64 decode fail:", i64, x) + } + } +} + +// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. +type fakeMarshaler struct { + b []byte + err error +} + +func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err } +func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) } +func (f *fakeMarshaler) ProtoMessage() {} +func (f *fakeMarshaler) Reset() {} + +type msgWithFakeMarshaler struct { + M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"` +} + +func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) } +func (m *msgWithFakeMarshaler) ProtoMessage() {} +func (m *msgWithFakeMarshaler) Reset() {} + +// Simple tests for proto messages that implement the Marshaler interface. +func TestMarshalerEncoding(t *testing.T) { + tests := []struct { + name string + m Message + want []byte + errType reflect.Type + }{ + { + name: "Marshaler that fails", + m: &fakeMarshaler{ + err: errors.New("some marshal err"), + b: []byte{5, 6, 7}, + }, + // Since the Marshal method returned bytes, they should be written to the + // buffer. (For efficiency, we assume that Marshal implementations are + // always correct w.r.t. RequiredNotSetError and output.) + want: []byte{5, 6, 7}, + errType: reflect.TypeOf(errors.New("some marshal err")), + }, + { + name: "Marshaler that fails with RequiredNotSetError", + m: &msgWithFakeMarshaler{ + M: &fakeMarshaler{ + err: &RequiredNotSetError{}, + b: []byte{5, 6, 7}, + }, + }, + // Since there's an error that can be continued after, + // the buffer should be written. + want: []byte{ + 10, 3, // for &msgWithFakeMarshaler + 5, 6, 7, // for &fakeMarshaler + }, + errType: reflect.TypeOf(&RequiredNotSetError{}), + }, + { + name: "Marshaler that succeeds", + m: &fakeMarshaler{ + b: []byte{0, 1, 2, 3, 4, 127, 255}, + }, + want: []byte{0, 1, 2, 3, 4, 127, 255}, + }, + } + for _, test := range tests { + b := NewBuffer(nil) + err := b.Marshal(test.m) + if reflect.TypeOf(err) != test.errType { + t.Errorf("%s: got err %T(%v) wanted %T", test.name, err, err, test.errType) + } + if !reflect.DeepEqual(test.want, b.Bytes()) { + t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) + } + if size := Size(test.m); size != len(b.Bytes()) { + t.Errorf("%s: Size(_) = %v, but marshaled to %v bytes", test.name, size, len(b.Bytes())) + } + + m, mErr := Marshal(test.m) + if !bytes.Equal(b.Bytes(), m) { + t.Errorf("%s: Marshal returned %v, but (*Buffer).Marshal wrote %v", test.name, m, b.Bytes()) + } + if !reflect.DeepEqual(err, mErr) { + t.Errorf("%s: Marshal err = %q, but (*Buffer).Marshal returned %q", + test.name, fmt.Sprint(mErr), fmt.Sprint(err)) + } + } +} + +// Simple tests for bytes +func TestBytesPrimitives(t *testing.T) { + o := old() + bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} + if o.EncodeRawBytes(bytes) != nil { + t.Error("EncodeRawBytes") + } + decb, e := o.DecodeRawBytes(false) + if e != nil { + t.Error("DecodeRawBytes") + } + equalbytes(bytes, decb, t) +} + +// Simple tests for strings +func TestStringPrimitives(t *testing.T) { + o := old() + s := "now is the time" + if o.EncodeStringBytes(s) != nil { + t.Error("enc_string") + } + decs, e := o.DecodeStringBytes() + if e != nil { + t.Error("dec_string") + } + if s != decs { + t.Error("string encode/decode fail:", s, decs) + } +} + +// Do we catch the "required bit not set" case? +func TestRequiredBit(t *testing.T) { + o := old() + pb := new(GoTest) + err := o.Marshal(pb) + if err == nil { + t.Error("did not catch missing required fields") + } else if strings.Index(err.Error(), "Kind") < 0 { + t.Error("wrong error type:", err) + } +} + +// Check that all fields are nil. +// Clearly silly, and a residue from a more interesting test with an earlier, +// different initialization property, but it once caught a compiler bug so +// it lives. +func checkInitialized(pb *GoTest, t *testing.T) { + if pb.F_BoolDefaulted != nil { + t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) + } + if pb.F_Int32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) + } + if pb.F_Int64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) + } + if pb.F_Fixed32Defaulted != nil { + t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) + } + if pb.F_Fixed64Defaulted != nil { + t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) + } + if pb.F_Uint32Defaulted != nil { + t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) + } + if pb.F_Uint64Defaulted != nil { + t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) + } + if pb.F_FloatDefaulted != nil { + t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) + } + if pb.F_DoubleDefaulted != nil { + t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) + } + if pb.F_StringDefaulted != nil { + t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) + } + if pb.F_BytesDefaulted != nil { + t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) + } + if pb.F_Sint32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) + } + if pb.F_Sint64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) + } +} + +// Does Reset() reset? +func TestReset(t *testing.T) { + pb := initGoTest(true) + // muck with some values + pb.F_BoolDefaulted = Bool(false) + pb.F_Int32Defaulted = Int32(237) + pb.F_Int64Defaulted = Int64(12346) + pb.F_Fixed32Defaulted = Uint32(32000) + pb.F_Fixed64Defaulted = Uint64(666) + pb.F_Uint32Defaulted = Uint32(323232) + pb.F_Uint64Defaulted = nil + pb.F_FloatDefaulted = nil + pb.F_DoubleDefaulted = Float64(0) + pb.F_StringDefaulted = String("gotcha") + pb.F_BytesDefaulted = []byte("asdfasdf") + pb.F_Sint32Defaulted = Int32(123) + pb.F_Sint64Defaulted = Int64(789) + pb.Reset() + checkInitialized(pb, t) +} + +// All required fields set, no defaults provided. +func TestEncodeDecode1(t *testing.T) { + pb := initGoTest(false) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 0x20 + "714000000000000000"+ // field 14, encoding 1, value 0x40 + "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 + "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" + "b304"+ // field 70, encoding 3, start group + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // field 70, encoding 4, end group + "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f") // field 103, encoding 0, 0x7f zigzag64 +} + +// All required fields set, defaults provided. +func TestEncodeDecode2(t *testing.T) { + pb := initGoTest(true) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All default fields set to their default value by hand +func TestEncodeDecode3(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolDefaulted = Bool(true) + pb.F_Int32Defaulted = Int32(32) + pb.F_Int64Defaulted = Int64(64) + pb.F_Fixed32Defaulted = Uint32(320) + pb.F_Fixed64Defaulted = Uint64(640) + pb.F_Uint32Defaulted = Uint32(3200) + pb.F_Uint64Defaulted = Uint64(6400) + pb.F_FloatDefaulted = Float32(314159) + pb.F_DoubleDefaulted = Float64(271828) + pb.F_StringDefaulted = String("hello, \"world!\"\n") + pb.F_BytesDefaulted = []byte("Bignose") + pb.F_Sint32Defaulted = Int32(-32) + pb.F_Sint64Defaulted = Int64(-64) + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all non-defaulted optional fields have values. +func TestEncodeDecode4(t *testing.T) { + pb := initGoTest(true) + pb.Table = String("hello") + pb.Param = Int32(7) + pb.OptionalField = initGoTestField() + pb.F_BoolOptional = Bool(true) + pb.F_Int32Optional = Int32(32) + pb.F_Int64Optional = Int64(64) + pb.F_Fixed32Optional = Uint32(3232) + pb.F_Fixed64Optional = Uint64(6464) + pb.F_Uint32Optional = Uint32(323232) + pb.F_Uint64Optional = Uint64(646464) + pb.F_FloatOptional = Float32(32.) + pb.F_DoubleOptional = Float64(64.) + pb.F_StringOptional = String("hello") + pb.F_BytesOptional = []byte("Bignose") + pb.F_Sint32Optional = Int32(-32) + pb.F_Sint64Optional = Int64(-64) + pb.Optionalgroup = initGoTest_OptionalGroup() + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" + "1807"+ // field 3, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "f00101"+ // field 30, encoding 0, value 1 + "f80120"+ // field 31, encoding 0, value 32 + "800240"+ // field 32, encoding 0, value 64 + "8d02a00c0000"+ // field 33, encoding 5, value 3232 + "91024019000000000000"+ // field 34, encoding 1, value 6464 + "9802a0dd13"+ // field 35, encoding 0, value 323232 + "a002c0ba27"+ // field 36, encoding 0, value 646464 + "ad0200000042"+ // field 37, encoding 5, value 32.0 + "b1020000000000005040"+ // field 38, encoding 1, value 64.0 + "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "d305"+ // start group field 90 level 1 + "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" + "d405"+ // end group field 90 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" + "f0123f"+ // field 302, encoding 0, value 63 + "f8127f"+ // field 303, encoding 0, value 127 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestEncodeDecode5(t *testing.T) { + pb := initGoTest(true) + pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} + pb.F_BoolRepeated = []bool{false, true} + pb.F_Int32Repeated = []int32{32, 33} + pb.F_Int64Repeated = []int64{64, 65} + pb.F_Fixed32Repeated = []uint32{3232, 3333} + pb.F_Fixed64Repeated = []uint64{6464, 6565} + pb.F_Uint32Repeated = []uint32{323232, 333333} + pb.F_Uint64Repeated = []uint64{646464, 656565} + pb.F_FloatRepeated = []float32{32., 33.} + pb.F_DoubleRepeated = []float64{64., 65.} + pb.F_StringRepeated = []string{"hello", "sailor"} + pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} + pb.F_Sint32Repeated = []int32{32, -32} + pb.F_Sint64Repeated = []int64{64, -64} + pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "a00100"+ // field 20, encoding 0, value 0 + "a00101"+ // field 20, encoding 0, value 1 + "a80120"+ // field 21, encoding 0, value 32 + "a80121"+ // field 21, encoding 0, value 33 + "b00140"+ // field 22, encoding 0, value 64 + "b00141"+ // field 22, encoding 0, value 65 + "bd01a00c0000"+ // field 23, encoding 5, value 3232 + "bd01050d0000"+ // field 23, encoding 5, value 3333 + "c1014019000000000000"+ // field 24, encoding 1, value 6464 + "c101a519000000000000"+ // field 24, encoding 1, value 6565 + "c801a0dd13"+ // field 25, encoding 0, value 323232 + "c80195ac14"+ // field 25, encoding 0, value 333333 + "d001c0ba27"+ // field 26, encoding 0, value 646464 + "d001b58928"+ // field 26, encoding 0, value 656565 + "dd0100000042"+ // field 27, encoding 5, value 32.0 + "dd0100000442"+ // field 27, encoding 5, value 33.0 + "e1010000000000005040"+ // field 28, encoding 1, value 64.0 + "e1010000000000405040"+ // field 28, encoding 1, value 65.0 + "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" + "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ca0c03"+"626967"+ // field 201, encoding 2, string "big" + "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" + "d00c40"+ // field 202, encoding 0, value 32 + "d00c3f"+ // field 202, encoding 0, value -32 + "d80c8001"+ // field 203, encoding 0, value 64 + "d80c7f"+ // field 203, encoding 0, value -64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, all packed repeated fields given two values. +func TestEncodeDecode6(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolRepeatedPacked = []bool{false, true} + pb.F_Int32RepeatedPacked = []int32{32, 33} + pb.F_Int64RepeatedPacked = []int64{64, 65} + pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} + pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} + pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} + pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} + pb.F_FloatRepeatedPacked = []float32{32., 33.} + pb.F_DoubleRepeatedPacked = []float64{64., 65.} + pb.F_Sint32RepeatedPacked = []int32{32, -32} + pb.F_Sint64RepeatedPacked = []int64{64, -64} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 + "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 + "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 + "aa0308"+ // field 53, encoding 2, 8 bytes + "a00c0000050d0000"+ // value 3232, value 3333 + "b20310"+ // field 54, encoding 2, 16 bytes + "4019000000000000a519000000000000"+ // value 6464, value 6565 + "ba0306"+ // field 55, encoding 2, 6 bytes + "a0dd1395ac14"+ // value 323232, value 333333 + "c20306"+ // field 56, encoding 2, 6 bytes + "c0ba27b58928"+ // value 646464, value 656565 + "ca0308"+ // field 57, encoding 2, 8 bytes + "0000004200000442"+ // value 32.0, value 33.0 + "d20310"+ // field 58, encoding 2, 16 bytes + "00000000000050400000000000405040"+ // value 64.0, value 65.0 + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "b21f02"+ // field 502, encoding 2, 2 bytes + "403f"+ // value 32, value -32 + "ba1f03"+ // field 503, encoding 2, 3 bytes + "80017f") // value 64, value -64 +} + +// Test that we can encode empty bytes fields. +func TestEncodeDecodeBytes1(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRequired = []byte{} + pb.F_BytesRepeated = [][]byte{{}} + pb.F_BytesOptional = []byte{} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { + t.Error("required empty bytes field is incorrect") + } + if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { + t.Error("repeated empty bytes field is incorrect") + } + if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { + t.Error("optional empty bytes field is incorrect") + } +} + +// Test that we encode nil-valued fields of a repeated bytes field correctly. +// Since entries in a repeated field cannot be nil, nil must mean empty value. +func TestEncodeDecodeBytes2(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRepeated = [][]byte{nil} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { + t.Error("Unexpected value for repeated bytes field") + } +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestSkippingUnrecognizedFields(t *testing.T) { + o := old() + pb := initGoTestField() + + // Marshal it normally. + o.Marshal(pb) + + // Now new a GoSkipTest record. + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + // Marshal it into same buffer. + o.Marshal(skip) + + pbd := new(GoTestField) + o.Unmarshal(pbd) + + // The __unrecognized field should be a marshaling of GoSkipTest + skipd := new(GoSkipTest) + + o.SetBuf(pbd.XXX_unrecognized) + o.Unmarshal(skipd) + + if *skipd.SkipInt32 != *skip.SkipInt32 { + t.Error("skip int32", skipd.SkipInt32) + } + if *skipd.SkipFixed32 != *skip.SkipFixed32 { + t.Error("skip fixed32", skipd.SkipFixed32) + } + if *skipd.SkipFixed64 != *skip.SkipFixed64 { + t.Error("skip fixed64", skipd.SkipFixed64) + } + if *skipd.SkipString != *skip.SkipString { + t.Error("skip string", *skipd.SkipString) + } + if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { + t.Error("skip group int32", skipd.Skipgroup.GroupInt32) + } + if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { + t.Error("skip group string", *skipd.Skipgroup.GroupString) + } +} + +// Check that unrecognized fields of a submessage are preserved. +func TestSubmessageUnrecognizedFields(t *testing.T) { + nm := &NewMessage{ + Nested: &NewMessage_Nested{ + Name: String("Nigel"), + FoodGroup: String("carbs"), + }, + } + b, err := Marshal(nm) + if err != nil { + t.Fatalf("Marshal of NewMessage: %v", err) + } + + // Unmarshal into an OldMessage. + om := new(OldMessage) + if err := Unmarshal(b, om); err != nil { + t.Fatalf("Unmarshal to OldMessage: %v", err) + } + exp := &OldMessage{ + Nested: &OldMessage_Nested{ + Name: String("Nigel"), + // normal protocol buffer users should not do this + XXX_unrecognized: []byte("\x12\x05carbs"), + }, + } + if !Equal(om, exp) { + t.Errorf("om = %v, want %v", om, exp) + } + + // Clone the OldMessage. + om = Clone(om).(*OldMessage) + if !Equal(om, exp) { + t.Errorf("Clone(om) = %v, want %v", om, exp) + } + + // Marshal the OldMessage, then unmarshal it into an empty NewMessage. + if b, err = Marshal(om); err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + t.Logf("Marshal(%v) -> %q", om, b) + nm2 := new(NewMessage) + if err := Unmarshal(b, nm2); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + if !Equal(nm, nm2) { + t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) + } +} + +// Check that an int32 field can be upgraded to an int64 field. +func TestNegativeInt32(t *testing.T) { + om := &OldMessage{ + Num: Int32(-1), + } + b, err := Marshal(om) + if err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + + // Check the size. It should be 11 bytes; + // 1 for the field/wire type, and 10 for the negative number. + if len(b) != 11 { + t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) + } + + // Unmarshal into a NewMessage. + nm := new(NewMessage) + if err := Unmarshal(b, nm); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + want := &NewMessage{ + Num: Int64(-1), + } + if !Equal(nm, want) { + t.Errorf("nm = %v, want %v", nm, want) + } +} + +// Check that we can grow an array (repeated field) to have many elements. +// This test doesn't depend only on our encoding; for variety, it makes sure +// we create, encode, and decode the correct contents explicitly. It's therefore +// a bit messier. +// This test also uses (and hence tests) the Marshal/Unmarshal functions +// instead of the methods. +func TestBigRepeated(t *testing.T) { + pb := initGoTest(true) + + // Create the arrays + const N = 50 // Internally the library starts much smaller. + pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) + pb.F_Sint64Repeated = make([]int64, N) + pb.F_Sint32Repeated = make([]int32, N) + pb.F_BytesRepeated = make([][]byte, N) + pb.F_StringRepeated = make([]string, N) + pb.F_DoubleRepeated = make([]float64, N) + pb.F_FloatRepeated = make([]float32, N) + pb.F_Uint64Repeated = make([]uint64, N) + pb.F_Uint32Repeated = make([]uint32, N) + pb.F_Fixed64Repeated = make([]uint64, N) + pb.F_Fixed32Repeated = make([]uint32, N) + pb.F_Int64Repeated = make([]int64, N) + pb.F_Int32Repeated = make([]int32, N) + pb.F_BoolRepeated = make([]bool, N) + pb.RepeatedField = make([]*GoTestField, N) + + // Fill in the arrays with checkable values. + igtf := initGoTestField() + igtrg := initGoTest_RepeatedGroup() + for i := 0; i < N; i++ { + pb.Repeatedgroup[i] = igtrg + pb.F_Sint64Repeated[i] = int64(i) + pb.F_Sint32Repeated[i] = int32(i) + s := fmt.Sprint(i) + pb.F_BytesRepeated[i] = []byte(s) + pb.F_StringRepeated[i] = s + pb.F_DoubleRepeated[i] = float64(i) + pb.F_FloatRepeated[i] = float32(i) + pb.F_Uint64Repeated[i] = uint64(i) + pb.F_Uint32Repeated[i] = uint32(i) + pb.F_Fixed64Repeated[i] = uint64(i) + pb.F_Fixed32Repeated[i] = uint32(i) + pb.F_Int64Repeated[i] = int64(i) + pb.F_Int32Repeated[i] = int32(i) + pb.F_BoolRepeated[i] = i%2 == 0 + pb.RepeatedField[i] = igtf + } + + // Marshal. + buf, _ := Marshal(pb) + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + Unmarshal(buf, pbd) + + // Check the checkable values + for i := uint64(0); i < N; i++ { + if pbd.Repeatedgroup[i] == nil { // TODO: more checking? + t.Error("pbd.Repeatedgroup bad") + } + var x uint64 + x = uint64(pbd.F_Sint64Repeated[i]) + if x != i { + t.Error("pbd.F_Sint64Repeated bad", x, i) + } + x = uint64(pbd.F_Sint32Repeated[i]) + if x != i { + t.Error("pbd.F_Sint32Repeated bad", x, i) + } + s := fmt.Sprint(i) + equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) + if pbd.F_StringRepeated[i] != s { + t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) + } + x = uint64(pbd.F_DoubleRepeated[i]) + if x != i { + t.Error("pbd.F_DoubleRepeated bad", x, i) + } + x = uint64(pbd.F_FloatRepeated[i]) + if x != i { + t.Error("pbd.F_FloatRepeated bad", x, i) + } + x = pbd.F_Uint64Repeated[i] + if x != i { + t.Error("pbd.F_Uint64Repeated bad", x, i) + } + x = uint64(pbd.F_Uint32Repeated[i]) + if x != i { + t.Error("pbd.F_Uint32Repeated bad", x, i) + } + x = pbd.F_Fixed64Repeated[i] + if x != i { + t.Error("pbd.F_Fixed64Repeated bad", x, i) + } + x = uint64(pbd.F_Fixed32Repeated[i]) + if x != i { + t.Error("pbd.F_Fixed32Repeated bad", x, i) + } + x = uint64(pbd.F_Int64Repeated[i]) + if x != i { + t.Error("pbd.F_Int64Repeated bad", x, i) + } + x = uint64(pbd.F_Int32Repeated[i]) + if x != i { + t.Error("pbd.F_Int32Repeated bad", x, i) + } + if pbd.F_BoolRepeated[i] != (i%2 == 0) { + t.Error("pbd.F_BoolRepeated bad", x, i) + } + if pbd.RepeatedField[i] == nil { // TODO: more checking? + t.Error("pbd.RepeatedField bad") + } + } +} + +// Verify we give a useful message when decoding to the wrong structure type. +func TestTypeMismatch(t *testing.T) { + pb1 := initGoTest(true) + + // Marshal + o := old() + o.Marshal(pb1) + + // Now Unmarshal it to the wrong type. + pb2 := initGoTestField() + err := o.Unmarshal(pb2) + if err == nil { + t.Error("expected error, got no error") + } else if !strings.Contains(err.Error(), "bad wiretype") { + t.Error("expected bad wiretype error, got", err) + } +} + +func encodeDecode(t *testing.T, in, out Message, msg string) { + buf, err := Marshal(in) + if err != nil { + t.Fatalf("failed marshaling %v: %v", msg, err) + } + if err := Unmarshal(buf, out); err != nil { + t.Fatalf("failed unmarshaling %v: %v", msg, err) + } +} + +func TestPackedNonPackedDecoderSwitching(t *testing.T) { + np, p := new(NonPackedTest), new(PackedTest) + + // non-packed -> packed + np.A = []int32{0, 1, 1, 2, 3, 5} + encodeDecode(t, np, p, "non-packed -> packed") + if !reflect.DeepEqual(np.A, p.B) { + t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) + } + + // packed -> non-packed + np.Reset() + p.B = []int32{3, 1, 4, 1, 5, 9} + encodeDecode(t, p, np, "packed -> non-packed") + if !reflect.DeepEqual(p.B, np.A) { + t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) + } +} + +func TestProto1RepeatedGroup(t *testing.T) { + pb := &MessageList{ + Message: []*MessageList_Message{ + { + Name: String("blah"), + Count: Int32(7), + }, + // NOTE: pb.Message[1] is a nil + nil, + }, + } + + o := old() + err := o.Marshal(pb) + if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { + t.Fatalf("unexpected or no error when marshaling: %v", err) + } +} + +// Test that enums work. Checks for a bug introduced by making enums +// named types instead of int32: newInt32FromUint64 would crash with +// a type mismatch in reflect.PointTo. +func TestEnum(t *testing.T) { + pb := new(GoEnum) + pb.Foo = FOO_FOO1.Enum() + o := old() + if err := o.Marshal(pb); err != nil { + t.Fatal("error encoding enum:", err) + } + pb1 := new(GoEnum) + if err := o.Unmarshal(pb1); err != nil { + t.Fatal("error decoding enum:", err) + } + if *pb1.Foo != FOO_FOO1 { + t.Error("expected 7 but got ", *pb1.Foo) + } +} + +// Enum types have String methods. Check that enum fields can be printed. +// We don't care what the value actually is, just as long as it doesn't crash. +func TestPrintingNilEnumFields(t *testing.T) { + pb := new(GoEnum) + _ = fmt.Sprintf("%+v", pb) +} + +// Verify that absent required fields cause Marshal/Unmarshal to return errors. +func TestRequiredFieldEnforcement(t *testing.T) { + pb := new(GoTestField) + _, err := Marshal(pb) + if err == nil { + t.Error("marshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Label") { + t.Errorf("marshal: bad error type: %v", err) + } + + // A slightly sneaky, yet valid, proto. It encodes the same required field twice, + // so simply counting the required fields is insufficient. + // field 1, encoding 2, value "hi" + buf := []byte("\x0A\x02hi\x0A\x02hi") + err = Unmarshal(buf, pb) + if err == nil { + t.Error("unmarshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "{Unknown}") { + t.Errorf("unmarshal: bad error type: %v", err) + } +} + +// Verify that absent required fields in groups cause Marshal/Unmarshal to return errors. +func TestRequiredFieldEnforcementGroups(t *testing.T) { + pb := &GoTestRequiredGroupField{Group: &GoTestRequiredGroupField_Group{}} + if _, err := Marshal(pb); err == nil { + t.Error("marshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") { + t.Errorf("marshal: bad error type: %v", err) + } + + buf := []byte{11, 12} + if err := Unmarshal(buf, pb); err == nil { + t.Error("unmarshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.{Unknown}") { + t.Errorf("unmarshal: bad error type: %v", err) + } +} + +func TestTypedNilMarshal(t *testing.T) { + // A typed nil should return ErrNil and not crash. + { + var m *GoEnum + if _, err := Marshal(m); err != ErrNil { + t.Errorf("Marshal(%#v): got %v, want ErrNil", m, err) + } + } + + { + m := &Communique{Union: &Communique_Msg{nil}} + if _, err := Marshal(m); err == nil || err == ErrNil { + t.Errorf("Marshal(%#v): got %v, want errOneofHasNil", m, err) + } + } +} + +// A type that implements the Marshaler interface, but is not nillable. +type nonNillableInt uint64 + +func (nni nonNillableInt) Marshal() ([]byte, error) { + return EncodeVarint(uint64(nni)), nil +} + +type NNIMessage struct { + nni nonNillableInt +} + +func (*NNIMessage) Reset() {} +func (*NNIMessage) String() string { return "" } +func (*NNIMessage) ProtoMessage() {} + +// A type that implements the Marshaler interface and is nillable. +type nillableMessage struct { + x uint64 +} + +func (nm *nillableMessage) Marshal() ([]byte, error) { + return EncodeVarint(nm.x), nil +} + +type NMMessage struct { + nm *nillableMessage +} + +func (*NMMessage) Reset() {} +func (*NMMessage) String() string { return "" } +func (*NMMessage) ProtoMessage() {} + +// Verify a type that uses the Marshaler interface, but has a nil pointer. +func TestNilMarshaler(t *testing.T) { + // Try a struct with a Marshaler field that is nil. + // It should be directly marshable. + nmm := new(NMMessage) + if _, err := Marshal(nmm); err != nil { + t.Error("unexpected error marshaling nmm: ", err) + } + + // Try a struct with a Marshaler field that is not nillable. + nnim := new(NNIMessage) + nnim.nni = 7 + var _ Marshaler = nnim.nni // verify it is truly a Marshaler + if _, err := Marshal(nnim); err != nil { + t.Error("unexpected error marshaling nnim: ", err) + } +} + +func TestAllSetDefaults(t *testing.T) { + // Exercise SetDefaults with all scalar field types. + m := &Defaults{ + // NaN != NaN, so override that here. + F_Nan: Float32(1.7), + } + expected := &Defaults{ + F_Bool: Bool(true), + F_Int32: Int32(32), + F_Int64: Int64(64), + F_Fixed32: Uint32(320), + F_Fixed64: Uint64(640), + F_Uint32: Uint32(3200), + F_Uint64: Uint64(6400), + F_Float: Float32(314159), + F_Double: Float64(271828), + F_String: String(`hello, "world!"` + "\n"), + F_Bytes: []byte("Bignose"), + F_Sint32: Int32(-32), + F_Sint64: Int64(-64), + F_Enum: Defaults_GREEN.Enum(), + F_Pinf: Float32(float32(math.Inf(1))), + F_Ninf: Float32(float32(math.Inf(-1))), + F_Nan: Float32(1.7), + StrZero: String(""), + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithSetField(t *testing.T) { + // Check that a set value is not overridden. + m := &Defaults{ + F_Int32: Int32(12), + } + SetDefaults(m) + if v := m.GetF_Int32(); v != 12 { + t.Errorf("m.FInt32 = %v, want 12", v) + } +} + +func TestSetDefaultsWithSubMessage(t *testing.T) { + m := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + }, + } + expected := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + Port: Int32(4000), + }, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { + m := &MyMessage{ + RepInner: []*InnerMessage{{}}, + } + expected := &MyMessage{ + RepInner: []*InnerMessage{{ + Port: Int32(4000), + }}, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { + m := &MyMessage{ + Pet: []string{"turtle", "wombat"}, + } + expected := Clone(m) + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestMaximumTagNumber(t *testing.T) { + m := &MaxTag{ + LastField: String("natural goat essence"), + } + buf, err := Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal failed: %v", err) + } + m2 := new(MaxTag) + if err := Unmarshal(buf, m2); err != nil { + t.Fatalf("proto.Unmarshal failed: %v", err) + } + if got, want := m2.GetLastField(), *m.LastField; got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestJSON(t *testing.T) { + m := &MyMessage{ + Count: Int32(4), + Pet: []string{"bunny", "kitty"}, + Inner: &InnerMessage{ + Host: String("cauchy"), + }, + Bikeshed: MyMessage_GREEN.Enum(), + } + const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` + + b, err := json.Marshal(m) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + s := string(b) + if s != expected { + t.Errorf("got %s\nwant %s", s, expected) + } + + received := new(MyMessage) + if err := json.Unmarshal(b, received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } + + // Test unmarshalling of JSON with symbolic enum name. + const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` + received.Reset() + if err := json.Unmarshal([]byte(old), received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } +} + +func TestBadWireType(t *testing.T) { + b := []byte{7<<3 | 6} // field 7, wire type 6 + pb := new(OtherMessage) + if err := Unmarshal(b, pb); err == nil { + t.Errorf("Unmarshal did not fail") + } else if !strings.Contains(err.Error(), "unknown wire type") { + t.Errorf("wrong error: %v", err) + } +} + +func TestBytesWithInvalidLength(t *testing.T) { + // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. + b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} + Unmarshal(b, new(MyMessage)) +} + +func TestLengthOverflow(t *testing.T) { + // Overflowing a length should not panic. + b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} + Unmarshal(b, new(MyMessage)) +} + +func TestVarintOverflow(t *testing.T) { + // Overflowing a 64-bit length should not be allowed. + b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} + if err := Unmarshal(b, new(MyMessage)); err == nil { + t.Fatalf("Overflowed uint64 length without error") + } +} + +func TestUnmarshalFuzz(t *testing.T) { + const N = 1000 + seed := time.Now().UnixNano() + t.Logf("RNG seed is %d", seed) + rng := rand.New(rand.NewSource(seed)) + buf := make([]byte, 20) + for i := 0; i < N; i++ { + for j := range buf { + buf[j] = byte(rng.Intn(256)) + } + fuzzUnmarshal(t, buf) + } +} + +func TestMergeMessages(t *testing.T) { + pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} + data, err := Marshal(pb) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + pb1 := new(MessageList) + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("first Unmarshal: %v", err) + } + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("second Unmarshal: %v", err) + } + if len(pb1.Message) != 1 { + t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) + } + + pb2 := new(MessageList) + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("first UnmarshalMerge: %v", err) + } + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("second UnmarshalMerge: %v", err) + } + if len(pb2.Message) != 2 { + t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) + } +} + +func TestExtensionMarshalOrder(t *testing.T) { + m := &MyMessage{Count: Int(123)} + if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { + t.Fatalf("SetExtension: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + var orig []byte + for i := 0; i < 100; i++ { + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if i == 0 { + orig = b + continue + } + if !bytes.Equal(b, orig) { + t.Errorf("Bytes differ on attempt #%d", i) + } + } +} + +// Many extensions, because small maps might not iterate differently on each iteration. +var exts = []*ExtensionDesc{ + E_X201, + E_X202, + E_X203, + E_X204, + E_X205, + E_X206, + E_X207, + E_X208, + E_X209, + E_X210, + E_X211, + E_X212, + E_X213, + E_X214, + E_X215, + E_X216, + E_X217, + E_X218, + E_X219, + E_X220, + E_X221, + E_X222, + E_X223, + E_X224, + E_X225, + E_X226, + E_X227, + E_X228, + E_X229, + E_X230, + E_X231, + E_X232, + E_X233, + E_X234, + E_X235, + E_X236, + E_X237, + E_X238, + E_X239, + E_X240, + E_X241, + E_X242, + E_X243, + E_X244, + E_X245, + E_X246, + E_X247, + E_X248, + E_X249, + E_X250, +} + +func TestMessageSetMarshalOrder(t *testing.T) { + m := &MyMessageSet{} + for _, x := range exts { + if err := SetExtension(m, x, &Empty{}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + } + + buf, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + for i := 0; i < 10; i++ { + b1, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if !bytes.Equal(b1, buf) { + t.Errorf("Bytes differ on re-Marshal #%d", i) + } + + m2 := &MyMessageSet{} + if err := Unmarshal(buf, m2); err != nil { + t.Errorf("Unmarshal: %v", err) + } + b2, err := Marshal(m2) + if err != nil { + t.Errorf("re-Marshal: %v", err) + } + if !bytes.Equal(b2, buf) { + t.Errorf("Bytes differ on round-trip #%d", i) + } + } +} + +func TestUnmarshalMergesMessages(t *testing.T) { + // If a nested message occurs twice in the input, + // the fields should be merged when decoding. + a := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("polhode"), + Port: Int32(1234), + }, + } + aData, err := Marshal(a) + if err != nil { + t.Fatalf("Marshal(a): %v", err) + } + b := &OtherMessage{ + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Connected: Bool(true), + }, + } + bData, err := Marshal(b) + if err != nil { + t.Fatalf("Marshal(b): %v", err) + } + want := &OtherMessage{ + Key: Int64(123), + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Port: Int32(1234), + Connected: Bool(true), + }, + } + got := new(OtherMessage) + if err := Unmarshal(append(aData, bData...), got); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !Equal(got, want) { + t.Errorf("\n got %v\nwant %v", got, want) + } +} + +func TestEncodingSizes(t *testing.T) { + tests := []struct { + m Message + n int + }{ + {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, + {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, + {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, + {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, + } + for _, test := range tests { + b, err := Marshal(test.m) + if err != nil { + t.Errorf("Marshal(%v): %v", test.m, err) + continue + } + if len(b) != test.n { + t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) + } + } +} + +func TestRequiredNotSetError(t *testing.T) { + pb := initGoTest(false) + pb.RequiredField.Label = nil + pb.F_Int32Required = nil + pb.F_Int64Required = nil + + expected := "0807" + // field 1, encoding 0, value 7 + "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) + "5001" + // field 10, encoding 0, value 1 + "6d20000000" + // field 13, encoding 5, value 0x20 + "714000000000000000" + // field 14, encoding 1, value 0x40 + "78a019" + // field 15, encoding 0, value 0xca0 = 3232 + "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45" + // field 17, encoding 5, value 3232.0 + "9101000000000040b940" + // field 18, encoding 1, value 6464.0 + "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" + "b304" + // field 70, encoding 3, start group + "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" + "b404" + // field 70, encoding 4, end group + "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" + "b0063f" + // field 102, encoding 0, 0x3f zigzag32 + "b8067f" // field 103, encoding 0, 0x7f zigzag64 + + o := old() + bytes, err := Marshal(pb) + if _, ok := err.(*RequiredNotSetError); !ok { + fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("expected = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-1 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 1", bytes) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = Unmarshal(bytes, pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { + t.Errorf("unmarshal wrong err msg: %v", err) + } + bytes, err = Marshal(pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-2 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 2", bytes) + t.Fatalf("string = %s", expected) + } +} + +func fuzzUnmarshal(t *testing.T, data []byte) { + defer func() { + if e := recover(); e != nil { + t.Errorf("These bytes caused a panic: %+v", data) + t.Logf("Stack:\n%s", debug.Stack()) + t.FailNow() + } + }() + + pb := new(MyMessage) + Unmarshal(data, pb) +} + +func TestMapFieldMarshal(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // b should be the concatenation of these three byte sequences in some order. + parts := []string{ + "\n\a\b\x01\x12\x03Rob", + "\n\a\b\x04\x12\x03Ian", + "\n\b\b\x08\x12\x04Dave", + } + ok := false + for i := range parts { + for j := range parts { + if j == i { + continue + } + for k := range parts { + if k == i || k == j { + continue + } + try := parts[i] + parts[j] + parts[k] + if bytes.Equal(b, []byte(try)) { + ok = true + break + } + } + } + } + if !ok { + t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) + } + t.Logf("FYI b: %q", b) + + (new(Buffer)).DebugPrint("Dump of b", b) +} + +func TestMapFieldRoundTrips(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + MsgMapping: map[int64]*FloatingPoint{ + 0x7001: &FloatingPoint{F: Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{ + false: []byte("that's not right!"), + true: []byte("aye, 'tis true!"), + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("FYI b: %q", b) + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + for _, pair := range [][2]interface{}{ + {m.NameMapping, m2.NameMapping}, + {m.MsgMapping, m2.MsgMapping}, + {m.ByteMapping, m2.ByteMapping}, + } { + if !reflect.DeepEqual(pair[0], pair[1]) { + t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) + } + } +} + +func TestMapFieldWithNil(t *testing.T) { + m1 := &MessageWithMap{ + MsgMapping: map[int64]*FloatingPoint{ + 1: nil, + }, + } + b, err := Marshal(m1) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) + } + if v, ok := m2.MsgMapping[1]; !ok { + t.Error("msg_mapping[1] not present") + } else if v != nil { + t.Errorf("msg_mapping[1] not nil: %v", v) + } +} + +func TestMapFieldWithNilBytes(t *testing.T) { + m1 := &MessageWithMap{ + ByteMapping: map[bool][]byte{ + false: []byte{}, + true: nil, + }, + } + n := Size(m1) + b, err := Marshal(m1) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if n != len(b) { + t.Errorf("Size(m1) = %d; want len(Marshal(m1)) = %d", n, len(b)) + } + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) + } + if v, ok := m2.ByteMapping[false]; !ok { + t.Error("byte_mapping[false] not present") + } else if len(v) != 0 { + t.Errorf("byte_mapping[false] not empty: %#v", v) + } + if v, ok := m2.ByteMapping[true]; !ok { + t.Error("byte_mapping[true] not present") + } else if len(v) != 0 { + t.Errorf("byte_mapping[true] not empty: %#v", v) + } +} + +func TestDecodeMapFieldMissingKey(t *testing.T) { + b := []byte{ + 0x0A, 0x03, // message, tag 1 (name_mapping), of length 3 bytes + // no key + 0x12, 0x01, 0x6D, // string value of length 1 byte, value "m" + } + got := &MessageWithMap{} + err := Unmarshal(b, got) + if err != nil { + t.Fatalf("failed to marshal map with missing key: %v", err) + } + want := &MessageWithMap{NameMapping: map[int32]string{0: "m"}} + if !Equal(got, want) { + t.Errorf("Unmarshaled map with no key was not as expected. got: %v, want %v", got, want) + } +} + +func TestDecodeMapFieldMissingValue(t *testing.T) { + b := []byte{ + 0x0A, 0x02, // message, tag 1 (name_mapping), of length 2 bytes + 0x08, 0x01, // varint key, value 1 + // no value + } + got := &MessageWithMap{} + err := Unmarshal(b, got) + if err != nil { + t.Fatalf("failed to marshal map with missing value: %v", err) + } + want := &MessageWithMap{NameMapping: map[int32]string{1: ""}} + if !Equal(got, want) { + t.Errorf("Unmarshaled map with no value was not as expected. got: %v, want %v", got, want) + } +} + +func TestOneof(t *testing.T) { + m := &Communique{} + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal of empty message with oneof: %v", err) + } + if len(b) != 0 { + t.Errorf("Marshal of empty message yielded too many bytes: %v", b) + } + + m = &Communique{ + Union: &Communique_Name{"Barry"}, + } + + // Round-trip. + b, err = Marshal(m) + if err != nil { + t.Fatalf("Marshal of message with oneof: %v", err) + } + if len(b) != 7 { // name tag/wire (1) + name len (1) + name (5) + t.Errorf("Incorrect marshal of message with oneof: %v", b) + } + m.Reset() + if err := Unmarshal(b, m); err != nil { + t.Fatalf("Unmarshal of message with oneof: %v", err) + } + if x, ok := m.Union.(*Communique_Name); !ok || x.Name != "Barry" { + t.Errorf("After round trip, Union = %+v", m.Union) + } + if name := m.GetName(); name != "Barry" { + t.Errorf("After round trip, GetName = %q, want %q", name, "Barry") + } + + // Let's try with a message in the oneof. + m.Union = &Communique_Msg{&Strings{StringField: String("deep deep string")}} + b, err = Marshal(m) + if err != nil { + t.Fatalf("Marshal of message with oneof set to message: %v", err) + } + if len(b) != 20 { // msg tag/wire (1) + msg len (1) + msg (1 + 1 + 16) + t.Errorf("Incorrect marshal of message with oneof set to message: %v", b) + } + m.Reset() + if err := Unmarshal(b, m); err != nil { + t.Fatalf("Unmarshal of message with oneof set to message: %v", err) + } + ss, ok := m.Union.(*Communique_Msg) + if !ok || ss.Msg.GetStringField() != "deep deep string" { + t.Errorf("After round trip with oneof set to message, Union = %+v", m.Union) + } +} + +func TestInefficientPackedBool(t *testing.T) { + // https://github.com/golang/protobuf/issues/76 + inp := []byte{ + 0x12, 0x02, // 0x12 = 2<<3|2; 2 bytes + // Usually a bool should take a single byte, + // but it is permitted to be any varint. + 0xb9, 0x30, + } + if err := Unmarshal(inp, new(MoreRepeated)); err != nil { + t.Error(err) + } +} + +// Benchmarks + +func testMsg() *GoTest { + pb := initGoTest(true) + const N = 1000 // Internally the library starts much smaller. + pb.F_Int32Repeated = make([]int32, N) + pb.F_DoubleRepeated = make([]float64, N) + for i := 0; i < N; i++ { + pb.F_Int32Repeated[i] = int32(i) + pb.F_DoubleRepeated[i] = float64(i) + } + return pb +} + +func bytesMsg() *GoTest { + pb := initGoTest(true) + buf := make([]byte, 4000) + for i := range buf { + buf[i] = byte(i) + } + pb.F_BytesDefaulted = buf + return pb +} + +func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { + d, _ := marshal(pb) + b.SetBytes(int64(len(d))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + marshal(pb) + } +} + +func benchmarkBufferMarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + p.Reset() + err := p.Marshal(pb0) + return p.Bytes(), err + }) +} + +func benchmarkSize(b *testing.B, pb Message) { + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + Size(pb) + return nil, nil + }) +} + +func newOf(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + return reflect.New(in.Type().Elem()).Interface().(Message) +} + +func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { + d, _ := Marshal(pb) + b.SetBytes(int64(len(d))) + pbd := newOf(pb) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + unmarshal(d, pbd) + } +} + +func benchmarkBufferUnmarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { + p.SetBuf(d) + return p.Unmarshal(pb0) + }) +} + +// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} + +func BenchmarkMarshal(b *testing.B) { + benchmarkMarshal(b, testMsg(), Marshal) +} + +func BenchmarkBufferMarshal(b *testing.B) { + benchmarkBufferMarshal(b, testMsg()) +} + +func BenchmarkSize(b *testing.B) { + benchmarkSize(b, testMsg()) +} + +func BenchmarkUnmarshal(b *testing.B) { + benchmarkUnmarshal(b, testMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshal(b *testing.B) { + benchmarkBufferUnmarshal(b, testMsg()) +} + +func BenchmarkMarshalBytes(b *testing.B) { + benchmarkMarshal(b, bytesMsg(), Marshal) +} + +func BenchmarkBufferMarshalBytes(b *testing.B) { + benchmarkBufferMarshal(b, bytesMsg()) +} + +func BenchmarkSizeBytes(b *testing.B) { + benchmarkSize(b, bytesMsg()) +} + +func BenchmarkUnmarshalBytes(b *testing.B) { + benchmarkUnmarshal(b, bytesMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshalBytes(b *testing.B) { + benchmarkBufferUnmarshal(b, bytesMsg()) +} + +func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { + b.StopTimer() + pb := initGoTestField() + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + pbd := new(GoTestField) + p := NewBuffer(nil) + p.Marshal(pb) + p.Marshal(skip) + p2 := NewBuffer(nil) + + b.StartTimer() + for i := 0; i < b.N; i++ { + p2.SetBuf(p.Bytes()) + p2.Unmarshal(pbd) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/any_test.go b/vendor/github.com/golang/protobuf/proto/any_test.go new file mode 100644 index 000000000000..1a3c22ed4160 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/any_test.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + pb "github.com/golang/protobuf/proto/proto3_proto" + testpb "github.com/golang/protobuf/proto/testdata" + anypb "github.com/golang/protobuf/ptypes/any" +) + +var ( + expandedMarshaler = proto.TextMarshaler{ExpandAny: true} + expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true} +) + +// anyEqual reports whether two messages which may be google.protobuf.Any or may +// contain google.protobuf.Any fields are equal. We can't use proto.Equal for +// comparison, because semantically equivalent messages may be marshaled to +// binary in different tag order. Instead, trust that TextMarshaler with +// ExpandAny option works and compare the text marshaling results. +func anyEqual(got, want proto.Message) bool { + // if messages are proto.Equal, no need to marshal. + if proto.Equal(got, want) { + return true + } + g := expandedMarshaler.Text(got) + w := expandedMarshaler.Text(want) + return g == w +} + +type golden struct { + m proto.Message + t, c string +} + +var goldenMessages = makeGolden() + +func makeGolden() []golden { + nested := &pb.Nested{Bunny: "Monty"} + nb, err := proto.Marshal(nested) + if err != nil { + panic(err) + } + m1 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}, + } + m2 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb}, + } + m3 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb}, + } + m4 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb}, + } + m5 := &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb} + + any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")} + proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")}) + proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar")) + any1b, err := proto.Marshal(any1) + if err != nil { + panic(err) + } + any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}} + proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")}) + any2b, err := proto.Marshal(any2) + if err != nil { + panic(err) + } + m6 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, + ManyThings: []*anypb.Any{ + &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b}, + &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, + }, + } + + const ( + m1Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/proto3_proto.Nested]: < + bunny: "Monty" + > +> +` + m2Golden = ` +name: "David" +result_count: 47 +anything: < + ["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: < + bunny: "Monty" + > +> +` + m3Golden = ` +name: "David" +result_count: 47 +anything: < + ["type.googleapis.com/\"/proto3_proto.Nested"]: < + bunny: "Monty" + > +> +` + m4Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Monty" + > +> +` + m5Golden = ` +[type.googleapis.com/proto3_proto.Nested]: < + bunny: "Monty" +> +` + m6Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/testdata.MyMessage]: < + count: 47 + name: "David" + [testdata.Ext.more]: < + data: "foo" + > + [testdata.Ext.text]: "bar" + > +> +many_things: < + [type.googleapis.com/testdata.MyMessage]: < + count: 42 + bikeshed: GREEN + rep_bytes: "roboto" + [testdata.Ext.more]: < + data: "baz" + > + > +> +many_things: < + [type.googleapis.com/testdata.MyMessage]: < + count: 47 + name: "David" + [testdata.Ext.more]: < + data: "foo" + > + [testdata.Ext.text]: "bar" + > +> +` + ) + return []golden{ + {m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "}, + {m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "}, + {m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "}, + {m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "}, + {m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "}, + {m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "}, + } +} + +func TestMarshalGolden(t *testing.T) { + for _, tt := range goldenMessages { + if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want { + t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want) + } + if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want { + t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want) + } + } +} + +func TestUnmarshalGolden(t *testing.T) { + for _, tt := range goldenMessages { + want := tt.m + got := proto.Clone(tt.m) + got.Reset() + if err := proto.UnmarshalText(tt.t, got); err != nil { + t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err) + } + if !anyEqual(got, want) { + t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want) + } + got.Reset() + if err := proto.UnmarshalText(tt.c, got); err != nil { + t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err) + } + if !anyEqual(got, want) { + t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want) + } + } +} + +func TestMarshalUnknownAny(t *testing.T) { + m := &pb.Message{ + Anything: &anypb.Any{ + TypeUrl: "foo", + Value: []byte("bar"), + }, + } + want := `anything: < + type_url: "foo" + value: "bar" +> +` + got := expandedMarshaler.Text(m) + if got != want { + t.Errorf("got\n`%s`\nwant\n`%s`", got, want) + } +} + +func TestAmbiguousAny(t *testing.T) { + pb := &anypb.Any{} + err := proto.UnmarshalText(` + type_url: "ttt/proto3_proto.Nested" + value: "\n\x05Monty" + `, pb) + t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err) + if err != nil { + t.Errorf("failed to parse ambiguous Any message: %v", err) + } +} + +func TestUnmarshalOverwriteAny(t *testing.T) { + pb := &anypb.Any{} + err := proto.UnmarshalText(` + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Monty" + > + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Rabbit of Caerbannog" + > + `, pb) + want := `line 7: Any message unpacked multiple times, or "type_url" already set` + if err.Error() != want { + t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) + } +} + +func TestUnmarshalAnyMixAndMatch(t *testing.T) { + pb := &anypb.Any{} + err := proto.UnmarshalText(` + value: "\n\x05Monty" + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Rabbit of Caerbannog" + > + `, pb) + want := `line 5: Any message unpacked multiple times, or "value" already set` + if err.Error() != want { + t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/clone_test.go b/vendor/github.com/golang/protobuf/proto/clone_test.go new file mode 100644 index 000000000000..f607ff49eba0 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/clone_test.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/golang/protobuf/proto" + + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +var cloneTestMessage = &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, +} + +func init() { + ext := &pb.Ext{ + Data: proto.String("extension"), + } + if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { + panic("SetExtension: " + err.Error()) + } +} + +func TestClone(t *testing.T) { + m := proto.Clone(cloneTestMessage).(*pb.MyMessage) + if !proto.Equal(m, cloneTestMessage) { + t.Errorf("Clone(%v) = %v", cloneTestMessage, m) + } + + // Verify it was a deep copy. + *m.Inner.Port++ + if proto.Equal(m, cloneTestMessage) { + t.Error("Mutating clone changed the original") + } + // Byte fields and repeated fields should be copied. + if &m.Pet[0] == &cloneTestMessage.Pet[0] { + t.Error("Pet: repeated field not copied") + } + if &m.Others[0] == &cloneTestMessage.Others[0] { + t.Error("Others: repeated field not copied") + } + if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { + t.Error("Others[0].Value: bytes field not copied") + } + if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { + t.Error("RepBytes: repeated field not copied") + } + if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { + t.Error("RepBytes[0]: bytes field not copied") + } +} + +func TestCloneNil(t *testing.T) { + var m *pb.MyMessage + if c := proto.Clone(m); !proto.Equal(m, c) { + t.Errorf("Clone(%v) = %v", m, c) + } +} + +var mergeTests = []struct { + src, dst, want proto.Message +}{ + { + src: &pb.MyMessage{ + Count: proto.Int32(42), + }, + dst: &pb.MyMessage{ + Name: proto.String("Dave"), + }, + want: &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + }, + }, + { + src: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + }, + Pet: []string{"horsey"}, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + }, + dst: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + { + // Explicitly test a src=nil field + Inner: nil, + }, + }, + }, + want: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty", "horsey"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + {}, + { + Value: []byte("some bytes"), + }, + }, + }, + }, + { + src: &pb.MyMessage{ + RepBytes: [][]byte{[]byte("wow")}, + }, + dst: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham")}, + }, + want: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, + }, + }, + // Check that a scalar bytes field replaces rather than appends. + { + src: &pb.OtherMessage{Value: []byte("foo")}, + dst: &pb.OtherMessage{Value: []byte("bar")}, + want: &pb.OtherMessage{Value: []byte("foo")}, + }, + { + src: &pb.MessageWithMap{ + NameMapping: map[int32]string{6: "Nigel"}, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + 0x4002: &pb.FloatingPoint{ + F: proto.Float64(2.0), + }, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + dst: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Bruce", // should be overwritten + 7: "Andrew", + }, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4002: &pb.FloatingPoint{ + F: proto.Float64(3.0), + Exact: proto.Bool(true), + }, // the entire message should be overwritten + }, + }, + want: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Nigel", + 7: "Andrew", + }, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + 0x4002: &pb.FloatingPoint{ + F: proto.Float64(2.0), + }, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + }, + // proto3 shouldn't merge zero values, + // in the same way that proto2 shouldn't merge nils. + { + src: &proto3pb.Message{ + Name: "Aaron", + Data: []byte(""), // zero value, but not nil + }, + dst: &proto3pb.Message{ + HeightInCm: 176, + Data: []byte("texas!"), + }, + want: &proto3pb.Message{ + Name: "Aaron", + HeightInCm: 176, + Data: []byte("texas!"), + }, + }, + // Oneof fields should merge by assignment. + { + src: &pb.Communique{ + Union: &pb.Communique_Number{41}, + }, + dst: &pb.Communique{ + Union: &pb.Communique_Name{"Bobby Tables"}, + }, + want: &pb.Communique{ + Union: &pb.Communique_Number{41}, + }, + }, + // Oneof nil is the same as not set. + { + src: &pb.Communique{}, + dst: &pb.Communique{ + Union: &pb.Communique_Name{"Bobby Tables"}, + }, + want: &pb.Communique{ + Union: &pb.Communique_Name{"Bobby Tables"}, + }, + }, + { + src: &proto3pb.Message{ + Terrain: map[string]*proto3pb.Nested{ + "kay_a": &proto3pb.Nested{Cute: true}, // replace + "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, // insert + }, + }, + dst: &proto3pb.Message{ + Terrain: map[string]*proto3pb.Nested{ + "kay_a": &proto3pb.Nested{Bunny: "lost"}, // replaced + "kay_c": &proto3pb.Nested{Bunny: "bunny"}, // keep + }, + }, + want: &proto3pb.Message{ + Terrain: map[string]*proto3pb.Nested{ + "kay_a": &proto3pb.Nested{Cute: true}, + "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, + "kay_c": &proto3pb.Nested{Bunny: "bunny"}, + }, + }, + }, +} + +func TestMerge(t *testing.T) { + for _, m := range mergeTests { + got := proto.Clone(m.dst) + proto.Merge(got, m.src) + if !proto.Equal(got, m.want) { + t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/decode_test.go b/vendor/github.com/golang/protobuf/proto/decode_test.go new file mode 100644 index 000000000000..b1f1304493e8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/decode_test.go @@ -0,0 +1,256 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "fmt" + "testing" + + "github.com/golang/protobuf/proto" + tpb "github.com/golang/protobuf/proto/proto3_proto" +) + +var ( + bytesBlackhole []byte + msgBlackhole = new(tpb.Message) +) + +// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and +// 2 bytes long). +func BenchmarkVarint32ArraySmall(b *testing.B) { + for i := uint(1); i <= 10; i++ { + dist := genInt32Dist([7]int{0, 3, 1}, 1<unmarshal. +} + +func TestMarshalUnmarshalRepeatedExtension(t *testing.T) { + // Add a repeated extension to the result. + tests := []struct { + name string + ext []*pb.ComplexExtension + }{ + { + "two fields", + []*pb.ComplexExtension{ + {First: proto.Int32(7)}, + {Second: proto.Int32(11)}, + }, + }, + { + "repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {Third: []int32{2000}}, + }, + }, + { + "two fields and repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {First: proto.Int32(9)}, + {Second: proto.Int32(21)}, + {Third: []int32{2000}}, + }, + }, + } + for _, test := range tests { + // Marshal message with a repeated extension. + msg1 := new(pb.OtherMessage) + err := proto.SetExtension(msg1, pb.E_RComplex, test.ext) + if err != nil { + t.Fatalf("[%s] Error setting extension: %v", test.name, err) + } + b, err := proto.Marshal(msg1) + if err != nil { + t.Fatalf("[%s] Error marshaling message: %v", test.name, err) + } + + // Unmarshal and read the merged proto. + msg2 := new(pb.OtherMessage) + err = proto.Unmarshal(b, msg2) + if err != nil { + t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) + } + e, err := proto.GetExtension(msg2, pb.E_RComplex) + if err != nil { + t.Fatalf("[%s] Error getting extension: %v", test.name, err) + } + ext := e.([]*pb.ComplexExtension) + if ext == nil { + t.Fatalf("[%s] Invalid extension", test.name) + } + if !reflect.DeepEqual(ext, test.ext) { + t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, test.ext) + } + } +} + +func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) { + // We may see multiple instances of the same extension in the wire + // format. For example, the proto compiler may encode custom options in + // this way. Here, we verify that we merge the extensions together. + tests := []struct { + name string + ext []*pb.ComplexExtension + }{ + { + "two fields", + []*pb.ComplexExtension{ + {First: proto.Int32(7)}, + {Second: proto.Int32(11)}, + }, + }, + { + "repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {Third: []int32{2000}}, + }, + }, + { + "two fields and repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {First: proto.Int32(9)}, + {Second: proto.Int32(21)}, + {Third: []int32{2000}}, + }, + }, + } + for _, test := range tests { + var buf bytes.Buffer + var want pb.ComplexExtension + + // Generate a serialized representation of a repeated extension + // by catenating bytes together. + for i, e := range test.ext { + // Merge to create the wanted proto. + proto.Merge(&want, e) + + // serialize the message + msg := new(pb.OtherMessage) + err := proto.SetExtension(msg, pb.E_Complex, e) + if err != nil { + t.Fatalf("[%s] Error setting extension %d: %v", test.name, i, err) + } + b, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("[%s] Error marshaling message %d: %v", test.name, i, err) + } + buf.Write(b) + } + + // Unmarshal and read the merged proto. + msg2 := new(pb.OtherMessage) + err := proto.Unmarshal(buf.Bytes(), msg2) + if err != nil { + t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) + } + e, err := proto.GetExtension(msg2, pb.E_Complex) + if err != nil { + t.Fatalf("[%s] Error getting extension: %v", test.name, err) + } + ext := e.(*pb.ComplexExtension) + if ext == nil { + t.Fatalf("[%s] Invalid extension", test.name) + } + if !reflect.DeepEqual(*ext, want) { + t.Errorf("[%s] Wrong value for ComplexExtension: got: %s want: %s\n", test.name, ext, want) + } + } +} + +func TestClearAllExtensions(t *testing.T) { + // unregistered extension + desc := &proto.ExtensionDesc{ + ExtendedType: (*pb.MyMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 101010100, + Name: "emptyextension", + Tag: "varint,0,opt", + } + m := &pb.MyMessage{} + if proto.HasExtension(m, desc) { + t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) + } + if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { + t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) + } + if !proto.HasExtension(m, desc) { + t.Errorf("proto.HasExtension(%s): got false, want true", proto.MarshalTextString(m)) + } + proto.ClearAllExtensions(m) + if proto.HasExtension(m, desc) { + t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) + } +} + +func TestMarshalRace(t *testing.T) { + // unregistered extension + desc := &proto.ExtensionDesc{ + ExtendedType: (*pb.MyMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 101010100, + Name: "emptyextension", + Tag: "varint,0,opt", + } + + m := &pb.MyMessage{Count: proto.Int32(4)} + if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { + t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) + } + + var g errgroup.Group + for n := 3; n > 0; n-- { + g.Go(func() error { + _, err := proto.Marshal(m) + return err + }) + } + if err := g.Wait(); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/map_test.go b/vendor/github.com/golang/protobuf/proto/map_test.go new file mode 100644 index 000000000000..313e8792452f --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/map_test.go @@ -0,0 +1,46 @@ +package proto_test + +import ( + "fmt" + "testing" + + "github.com/golang/protobuf/proto" + ppb "github.com/golang/protobuf/proto/proto3_proto" +) + +func marshalled() []byte { + m := &ppb.IntMaps{} + for i := 0; i < 1000; i++ { + m.Maps = append(m.Maps, &ppb.IntMap{ + Rtt: map[int32]int32{1: 2}, + }) + } + b, err := proto.Marshal(m) + if err != nil { + panic(fmt.Sprintf("Can't marshal %+v: %v", m, err)) + } + return b +} + +func BenchmarkConcurrentMapUnmarshal(b *testing.B) { + in := marshalled() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + var out ppb.IntMaps + if err := proto.Unmarshal(in, &out); err != nil { + b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) + } + } + }) +} + +func BenchmarkSequentialMapUnmarshal(b *testing.B) { + in := marshalled() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var out ppb.IntMaps + if err := proto.Unmarshal(in, &out); err != nil { + b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/message_set_test.go b/vendor/github.com/golang/protobuf/proto/message_set_test.go new file mode 100644 index 000000000000..353a3ea7694b --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/message_set_test.go @@ -0,0 +1,66 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "testing" +) + +func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { + // Check that a repeated message set entry will be concatenated. + in := &messageSet{ + Item: []*_MessageSet_Item{ + {TypeId: Int32(12345), Message: []byte("hoo")}, + {TypeId: Int32(12345), Message: []byte("hah")}, + }, + } + b, err := Marshal(in) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("Marshaled bytes: %q", b) + + var extensions XXX_InternalExtensions + if err := UnmarshalMessageSet(b, &extensions); err != nil { + t.Fatalf("UnmarshalMessageSet: %v", err) + } + ext, ok := extensions.p.extensionMap[12345] + if !ok { + t.Fatalf("Didn't retrieve extension 12345; map is %v", extensions.p.extensionMap) + } + // Skip wire type/field number and length varints. + got := skipVarint(skipVarint(ext.enc)) + if want := []byte("hoohah"); !bytes.Equal(got, want) { + t.Errorf("Combined extension is %q, want %q", got, want) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/proto3_test.go b/vendor/github.com/golang/protobuf/proto/proto3_test.go new file mode 100644 index 000000000000..735837f2de0e --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto3_test.go @@ -0,0 +1,135 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/proto/proto3_proto" + tpb "github.com/golang/protobuf/proto/testdata" +) + +func TestProto3ZeroValues(t *testing.T) { + tests := []struct { + desc string + m proto.Message + }{ + {"zero message", &pb.Message{}}, + {"empty bytes field", &pb.Message{Data: []byte{}}}, + } + for _, test := range tests { + b, err := proto.Marshal(test.m) + if err != nil { + t.Errorf("%s: proto.Marshal: %v", test.desc, err) + continue + } + if len(b) > 0 { + t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) + } + } +} + +func TestRoundTripProto3(t *testing.T) { + m := &pb.Message{ + Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" + Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 + HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 + Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" + ResultCount: 47, // (0 | 7<<3): 0x38 0x2f + TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 + Score: 8.1, // (5 | 9<<3): 0x4d <8.1> + + Key: []uint64{1, 0xdeadbeef}, + Nested: &pb.Nested{ + Bunny: "Monty", + }, + } + t.Logf(" m: %v", m) + + b, err := proto.Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal: %v", err) + } + t.Logf(" b: %q", b) + + m2 := new(pb.Message) + if err := proto.Unmarshal(b, m2); err != nil { + t.Fatalf("proto.Unmarshal: %v", err) + } + t.Logf("m2: %v", m2) + + if !proto.Equal(m, m2) { + t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) + } +} + +func TestGettersForBasicTypesExist(t *testing.T) { + var m pb.Message + if got := m.GetNested().GetBunny(); got != "" { + t.Errorf("m.GetNested().GetBunny() = %q, want empty string", got) + } + if got := m.GetNested().GetCute(); got { + t.Errorf("m.GetNested().GetCute() = %t, want false", got) + } +} + +func TestProto3SetDefaults(t *testing.T) { + in := &pb.Message{ + Terrain: map[string]*pb.Nested{ + "meadow": new(pb.Nested), + }, + Proto2Field: new(tpb.SubDefaults), + Proto2Value: map[string]*tpb.SubDefaults{ + "badlands": new(tpb.SubDefaults), + }, + } + + got := proto.Clone(in).(*pb.Message) + proto.SetDefaults(got) + + // There are no defaults in proto3. Everything should be the zero value, but + // we need to remember to set defaults for nested proto2 messages. + want := &pb.Message{ + Terrain: map[string]*pb.Nested{ + "meadow": new(pb.Nested), + }, + Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, + Proto2Value: map[string]*tpb.SubDefaults{ + "badlands": &tpb.SubDefaults{N: proto.Int64(7)}, + }, + } + + if !proto.Equal(got, want) { + t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/size2_test.go b/vendor/github.com/golang/protobuf/proto/size2_test.go new file mode 100644 index 000000000000..a2729c39a1b8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/size2_test.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "testing" +) + +// This is a separate file and package from size_test.go because that one uses +// generated messages and thus may not be in package proto without having a circular +// dependency, whereas this file tests unexported details of size.go. + +func TestVarintSize(t *testing.T) { + // Check the edge cases carefully. + testCases := []struct { + n uint64 + size int + }{ + {0, 1}, + {1, 1}, + {127, 1}, + {128, 2}, + {16383, 2}, + {16384, 3}, + {1<<63 - 1, 9}, + {1 << 63, 10}, + } + for _, tc := range testCases { + size := sizeVarint(tc.n) + if size != tc.size { + t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/size_test.go b/vendor/github.com/golang/protobuf/proto/size_test.go new file mode 100644 index 000000000000..af1034dc7b89 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/size_test.go @@ -0,0 +1,164 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "log" + "strings" + "testing" + + . "github.com/golang/protobuf/proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} + +// messageWithExtension2 is in equal_test.go. +var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} + +func init() { + if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + + // Force messageWithExtension3 to have the extension encoded. + Marshal(messageWithExtension3) + +} + +var SizeTests = []struct { + desc string + pb Message +}{ + {"empty", &pb.OtherMessage{}}, + // Basic types. + {"bool", &pb.Defaults{F_Bool: Bool(true)}}, + {"int32", &pb.Defaults{F_Int32: Int32(12)}}, + {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, + {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, + {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, + {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, + {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, + {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, + {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, + {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, + {"float", &pb.Defaults{F_Float: Float32(12.6)}}, + {"double", &pb.Defaults{F_Double: Float64(13.9)}}, + {"string", &pb.Defaults{F_String: String("niles")}}, + {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, + {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, + {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, + {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, + {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, + // Repeated. + {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, + {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, + {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, + {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, + {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, + {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ + // Need enough large numbers to verify that the header is counting the number of bytes + // for the field, not the number of elements. + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + }}}, + {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, + {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, + // Nested. + {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, + {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, + // Other things. + {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, + {"extension (unencoded)", messageWithExtension1}, + {"extension (encoded)", messageWithExtension3}, + // proto3 message + {"proto3 empty", &proto3pb.Message{}}, + {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, + {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, + {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, + {"proto3 float", &proto3pb.Message{Score: 12.6}}, + {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, + {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, + {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, + {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, + {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}}, + + {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, + {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}}, + {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, + {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}}, + + {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, + {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, + {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, + + {"oneof not set", &pb.Oneof{}}, + {"oneof bool", &pb.Oneof{Union: &pb.Oneof_F_Bool{true}}}, + {"oneof zero int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{0}}}, + {"oneof big int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{1 << 20}}}, + {"oneof int64", &pb.Oneof{Union: &pb.Oneof_F_Int64{42}}}, + {"oneof fixed32", &pb.Oneof{Union: &pb.Oneof_F_Fixed32{43}}}, + {"oneof fixed64", &pb.Oneof{Union: &pb.Oneof_F_Fixed64{44}}}, + {"oneof uint32", &pb.Oneof{Union: &pb.Oneof_F_Uint32{45}}}, + {"oneof uint64", &pb.Oneof{Union: &pb.Oneof_F_Uint64{46}}}, + {"oneof float", &pb.Oneof{Union: &pb.Oneof_F_Float{47.1}}}, + {"oneof double", &pb.Oneof{Union: &pb.Oneof_F_Double{48.9}}}, + {"oneof string", &pb.Oneof{Union: &pb.Oneof_F_String{"Rhythmic Fman"}}}, + {"oneof bytes", &pb.Oneof{Union: &pb.Oneof_F_Bytes{[]byte("let go")}}}, + {"oneof sint32", &pb.Oneof{Union: &pb.Oneof_F_Sint32{50}}}, + {"oneof sint64", &pb.Oneof{Union: &pb.Oneof_F_Sint64{51}}}, + {"oneof enum", &pb.Oneof{Union: &pb.Oneof_F_Enum{pb.MyMessage_BLUE}}}, + {"message for oneof", &pb.GoTestField{Label: String("k"), Type: String("v")}}, + {"oneof message", &pb.Oneof{Union: &pb.Oneof_F_Message{&pb.GoTestField{Label: String("k"), Type: String("v")}}}}, + {"oneof group", &pb.Oneof{Union: &pb.Oneof_FGroup{&pb.Oneof_F_Group{X: Int32(52)}}}}, + {"oneof largest tag", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{1}}}, + {"multiple oneofs", &pb.Oneof{Union: &pb.Oneof_F_Int32{1}, Tormato: &pb.Oneof_Value{2}}}, +} + +func TestSize(t *testing.T) { + for _, tc := range SizeTests { + size := Size(tc.pb) + b, err := Marshal(tc.pb) + if err != nil { + t.Errorf("%v: Marshal failed: %v", tc.desc, err) + continue + } + if size != len(b) { + t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) + t.Logf("%v: bytes: %#v", tc.desc, b) + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/text_parser_test.go b/vendor/github.com/golang/protobuf/proto/text_parser_test.go new file mode 100644 index 000000000000..8f7cb4d27428 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_parser_test.go @@ -0,0 +1,673 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "math" + "reflect" + "testing" + + . "github.com/golang/protobuf/proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + . "github.com/golang/protobuf/proto/testdata" +) + +type UnmarshalTextTest struct { + in string + err string // if "", no error expected + out *MyMessage +} + +func buildExtStructTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_More, &Ext{ + Data: String("Hello, world!"), + }) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtDataTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_Text, String("Hello, world!")) + SetExtension(msg, E_Ext_Number, Int32(1729)) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtRepStringTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { + panic(err) + } + return UnmarshalTextTest{in: text, out: msg} +} + +var unMarshalTextTests = []UnmarshalTextTest{ + // Basic + { + in: " count:42\n name:\"Dave\" ", + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + }, + }, + + // Empty quoted string + { + in: `count:42 name:""`, + out: &MyMessage{ + Count: Int32(42), + Name: String(""), + }, + }, + + // Quoted string concatenation with double quotes + { + in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string concatenation with single quotes + { + in: "count:42 name: 'My name is '\n'elsewhere'", + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string concatenations with mixed quotes + { + in: "count:42 name: 'My name is '\n\"elsewhere\"", + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + { + in: "count:42 name: \"My name is \"\n'elsewhere'", + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string with escaped apostrophe + { + in: `count:42 name: "HOLIDAY - New Year\'s Day"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("HOLIDAY - New Year's Day"), + }, + }, + + // Quoted string with single quote + { + in: `count:42 name: 'Roger "The Ramster" Ramjet'`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`Roger "The Ramster" Ramjet`), + }, + }, + + // Quoted string with all the accepted special characters from the C++ test + { + in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", + out: &MyMessage{ + Count: Int32(42), + Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), + }, + }, + + // Quoted string with quoted backslash + { + in: `count:42 name: "\\'xyz"`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`\'xyz`), + }, + }, + + // Quoted string with UTF-8 bytes. + { + in: "count:42 name: '\303\277\302\201\xAB'", + out: &MyMessage{ + Count: Int32(42), + Name: String("\303\277\302\201\xAB"), + }, + }, + + // Bad quoted string + { + in: `inner: < host: "\0" >` + "\n", + err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`, + }, + + // Number too large for int64 + { + in: "count: 1 others { key: 123456789012345678901 }", + err: "line 1.23: invalid int64: 123456789012345678901", + }, + + // Number too large for int32 + { + in: "count: 1234567890123", + err: "line 1.7: invalid int32: 1234567890123", + }, + + // Number in hexadecimal + { + in: "count: 0x2beef", + out: &MyMessage{ + Count: Int32(0x2beef), + }, + }, + + // Number in octal + { + in: "count: 024601", + out: &MyMessage{ + Count: Int32(024601), + }, + }, + + // Floating point number with "f" suffix + { + in: "count: 4 others:< weight: 17.0f >", + out: &MyMessage{ + Count: Int32(4), + Others: []*OtherMessage{ + { + Weight: Float32(17), + }, + }, + }, + }, + + // Floating point positive infinity + { + in: "count: 4 bigfloat: inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(1)), + }, + }, + + // Floating point negative infinity + { + in: "count: 4 bigfloat: -inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(-1)), + }, + }, + + // Number too large for float32 + { + in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", + err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", + }, + + // Number posing as a quoted string + { + in: `inner: < host: 12 >` + "\n", + err: `line 1.15: invalid string: 12`, + }, + + // Quoted string posing as int32 + { + in: `count: "12"`, + err: `line 1.7: invalid int32: "12"`, + }, + + // Quoted string posing a float32 + { + in: `others:< weight: "17.4" >`, + err: `line 1.17: invalid float32: "17.4"`, + }, + + // Enum + { + in: `count:42 bikeshed: BLUE`, + out: &MyMessage{ + Count: Int32(42), + Bikeshed: MyMessage_BLUE.Enum(), + }, + }, + + // Repeated field + { + in: `count:42 pet: "horsey" pet:"bunny"`, + out: &MyMessage{ + Count: Int32(42), + Pet: []string{"horsey", "bunny"}, + }, + }, + + // Repeated field with list notation + { + in: `count:42 pet: ["horsey", "bunny"]`, + out: &MyMessage{ + Count: Int32(42), + Pet: []string{"horsey", "bunny"}, + }, + }, + + // Repeated message with/without colon and <>/{} + { + in: `count:42 others:{} others{} others:<> others:{}`, + out: &MyMessage{ + Count: Int32(42), + Others: []*OtherMessage{ + {}, + {}, + {}, + {}, + }, + }, + }, + + // Missing colon for inner message + { + in: `count:42 inner < host: "cauchy.syd" >`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("cauchy.syd"), + }, + }, + }, + + // Missing colon for string field + { + in: `name "Dave"`, + err: `line 1.5: expected ':', found "\"Dave\""`, + }, + + // Missing colon for int32 field + { + in: `count 42`, + err: `line 1.6: expected ':', found "42"`, + }, + + // Missing required field + { + in: `name: "Pawel"`, + err: `proto: required field "testdata.MyMessage.count" not set`, + out: &MyMessage{ + Name: String("Pawel"), + }, + }, + + // Missing required field in a required submessage + { + in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`, + err: `proto: required field "testdata.InnerMessage.host" not set`, + out: &MyMessage{ + Count: Int32(42), + WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}}, + }, + }, + + // Repeated non-repeated field + { + in: `name: "Rob" name: "Russ"`, + err: `line 1.12: non-repeated field "name" was repeated`, + }, + + // Group + { + in: `count: 17 SomeGroup { group_field: 12 }`, + out: &MyMessage{ + Count: Int32(17), + Somegroup: &MyMessage_SomeGroup{ + GroupField: Int32(12), + }, + }, + }, + + // Semicolon between fields + { + in: `count:3;name:"Calvin"`, + out: &MyMessage{ + Count: Int32(3), + Name: String("Calvin"), + }, + }, + // Comma between fields + { + in: `count:4,name:"Ezekiel"`, + out: &MyMessage{ + Count: Int32(4), + Name: String("Ezekiel"), + }, + }, + + // Boolean false + { + in: `count:42 inner { host: "example.com" connected: false }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean true + { + in: `count:42 inner { host: "example.com" connected: true }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + // Boolean 0 + { + in: `count:42 inner { host: "example.com" connected: 0 }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean 1 + { + in: `count:42 inner { host: "example.com" connected: 1 }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + // Boolean f + { + in: `count:42 inner { host: "example.com" connected: f }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean t + { + in: `count:42 inner { host: "example.com" connected: t }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + // Boolean False + { + in: `count:42 inner { host: "example.com" connected: False }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean True + { + in: `count:42 inner { host: "example.com" connected: True }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + + // Extension + buildExtStructTest(`count: 42 [testdata.Ext.more]:`), + buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), + buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), + buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), + + // Big all-in-one + { + in: "count:42 # Meaning\n" + + `name:"Dave" ` + + `quote:"\"I didn't want to go.\"" ` + + `pet:"bunny" ` + + `pet:"kitty" ` + + `pet:"horsey" ` + + `inner:<` + + ` host:"footrest.syd" ` + + ` port:7001 ` + + ` connected:true ` + + `> ` + + `others:<` + + ` key:3735928559 ` + + ` value:"\x01A\a\f" ` + + `> ` + + `others:<` + + " weight:58.9 # Atomic weight of Co\n" + + ` inner:<` + + ` host:"lesha.mtv" ` + + ` port:8002 ` + + ` >` + + `>`, + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + Quote: String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &InnerMessage{ + Host: String("footrest.syd"), + Port: Int32(7001), + Connected: Bool(true), + }, + Others: []*OtherMessage{ + { + Key: Int64(3735928559), + Value: []byte{0x1, 'A', '\a', '\f'}, + }, + { + Weight: Float32(58.9), + Inner: &InnerMessage{ + Host: String("lesha.mtv"), + Port: Int32(8002), + }, + }, + }, + }, + }, +} + +func TestUnmarshalText(t *testing.T) { + for i, test := range unMarshalTextTests { + pb := new(MyMessage) + err := UnmarshalText(test.in, pb) + if test.err == "" { + // We don't expect failure. + if err != nil { + t.Errorf("Test %d: Unexpected error: %v", i, err) + } else if !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } else { + // We do expect failure. + if err == nil { + t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) + } else if err.Error() != test.err { + t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", + i, err.Error(), test.err) + } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } + } +} + +func TestUnmarshalTextCustomMessage(t *testing.T) { + msg := &textMessage{} + if err := UnmarshalText("custom", msg); err != nil { + t.Errorf("Unexpected error from custom unmarshal: %v", err) + } + if UnmarshalText("not custom", msg) == nil { + t.Errorf("Didn't get expected error from custom unmarshal") + } +} + +// Regression test; this caused a panic. +func TestRepeatedEnum(t *testing.T) { + pb := new(RepeatedEnum) + if err := UnmarshalText("color: RED", pb); err != nil { + t.Fatal(err) + } + exp := &RepeatedEnum{ + Color: []RepeatedEnum_Color{RepeatedEnum_RED}, + } + if !Equal(pb, exp) { + t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) + } +} + +func TestProto3TextParsing(t *testing.T) { + m := new(proto3pb.Message) + const in = `name: "Wallace" true_scotsman: true` + want := &proto3pb.Message{ + Name: "Wallace", + TrueScotsman: true, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +func TestMapParsing(t *testing.T) { + m := new(MessageWithMap) + const in = `name_mapping: name_mapping:` + + `msg_mapping:,>` + // separating commas are okay + `msg_mapping>` + // no colon after "value" + `msg_mapping:>` + // omitted key + `msg_mapping:` + // omitted value + `byte_mapping:` + + `byte_mapping:<>` // omitted key and value + want := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Beatles", + 1234: "Feist", + }, + MsgMapping: map[int64]*FloatingPoint{ + -4: {F: Float64(2.0)}, + -2: {F: Float64(4.0)}, + 0: {F: Float64(5.0)}, + 1: nil, + }, + ByteMapping: map[bool][]byte{ + false: nil, + true: []byte("so be it"), + }, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +func TestOneofParsing(t *testing.T) { + const in = `name:"Shrek"` + m := new(Communique) + want := &Communique{Union: &Communique_Name{"Shrek"}} + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } + + const inOverwrite = `name:"Shrek" number:42` + m = new(Communique) + testErr := "line 1.13: field 'number' would overwrite already parsed oneof 'Union'" + if err := UnmarshalText(inOverwrite, m); err == nil { + t.Errorf("TestOneofParsing: Didn't get expected error: %v", testErr) + } else if err.Error() != testErr { + t.Errorf("TestOneofParsing: Incorrect error.\nHave: %v\nWant: %v", + err.Error(), testErr) + } + +} + +var benchInput string + +func init() { + benchInput = "count: 4\n" + for i := 0; i < 1000; i++ { + benchInput += "pet: \"fido\"\n" + } + + // Check it is valid input. + pb := new(MyMessage) + err := UnmarshalText(benchInput, pb) + if err != nil { + panic("Bad benchmark input: " + err.Error()) + } +} + +func BenchmarkUnmarshalText(b *testing.B) { + pb := new(MyMessage) + for i := 0; i < b.N; i++ { + UnmarshalText(benchInput, pb) + } + b.SetBytes(int64(len(benchInput))) +} diff --git a/vendor/github.com/golang/protobuf/proto/text_test.go b/vendor/github.com/golang/protobuf/proto/text_test.go new file mode 100644 index 000000000000..3eabacac8f99 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_test.go @@ -0,0 +1,474 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "errors" + "io/ioutil" + "math" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +// textMessage implements the methods that allow it to marshal and unmarshal +// itself as text. +type textMessage struct { +} + +func (*textMessage) MarshalText() ([]byte, error) { + return []byte("custom"), nil +} + +func (*textMessage) UnmarshalText(bytes []byte) error { + if string(bytes) != "custom" { + return errors.New("expected 'custom'") + } + return nil +} + +func (*textMessage) Reset() {} +func (*textMessage) String() string { return "" } +func (*textMessage) ProtoMessage() {} + +func newTestMessage() *pb.MyMessage { + msg := &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Quote: proto.String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("footrest.syd"), + Port: proto.Int32(7001), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(0xdeadbeef), + Value: []byte{1, 65, 7, 12}, + }, + { + Weight: proto.Float32(6.022), + Inner: &pb.InnerMessage{ + Host: proto.String("lesha.mtv"), + Port: proto.Int32(8002), + }, + }, + }, + Bikeshed: pb.MyMessage_BLUE.Enum(), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(8), + }, + // One normally wouldn't do this. + // This is an undeclared tag 13, as a varint (wire type 0) with value 4. + XXX_unrecognized: []byte{13<<3 | 0, 4}, + } + ext := &pb.Ext{ + Data: proto.String("Big gobs for big rats"), + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { + panic(err) + } + greetings := []string{"adg", "easy", "cow"} + if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { + panic(err) + } + + // Add an unknown extension. We marshal a pb.Ext, and fake the ID. + b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) + if err != nil { + panic(err) + } + b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) + proto.SetRawExtension(msg, 201, b) + + // Extensions can be plain fields, too, so let's test that. + b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) + proto.SetRawExtension(msg, 202, b) + + return msg +} + +const text = `count: 42 +name: "Dave" +quote: "\"I didn't want to go.\"" +pet: "bunny" +pet: "kitty" +pet: "horsey" +inner: < + host: "footrest.syd" + port: 7001 + connected: true +> +others: < + key: 3735928559 + value: "\001A\007\014" +> +others: < + weight: 6.022 + inner: < + host: "lesha.mtv" + port: 8002 + > +> +bikeshed: BLUE +SomeGroup { + group_field: 8 +} +/* 2 unknown bytes */ +13: 4 +[testdata.Ext.more]: < + data: "Big gobs for big rats" +> +[testdata.greeting]: "adg" +[testdata.greeting]: "easy" +[testdata.greeting]: "cow" +/* 13 unknown bytes */ +201: "\t3G skiing" +/* 3 unknown bytes */ +202: 19 +` + +func TestMarshalText(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, newTestMessage()); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != text { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) + } +} + +func TestMarshalTextCustomMessage(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, &textMessage{}); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != "custom" { + t.Errorf("Got %q, expected %q", s, "custom") + } +} +func TestMarshalTextNil(t *testing.T) { + want := "" + tests := []proto.Message{nil, (*pb.MyMessage)(nil)} + for i, test := range tests { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, test); err != nil { + t.Fatal(err) + } + if got := buf.String(); got != want { + t.Errorf("%d: got %q want %q", i, got, want) + } + } +} + +func TestMarshalTextUnknownEnum(t *testing.T) { + // The Color enum only specifies values 0-2. + m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} + got := m.String() + const want = `bikeshed:3 ` + if got != want { + t.Errorf("\n got %q\nwant %q", got, want) + } +} + +func TestTextOneof(t *testing.T) { + tests := []struct { + m proto.Message + want string + }{ + // zero message + {&pb.Communique{}, ``}, + // scalar field + {&pb.Communique{Union: &pb.Communique_Number{4}}, `number:4`}, + // message field + {&pb.Communique{Union: &pb.Communique_Msg{ + &pb.Strings{StringField: proto.String("why hello!")}, + }}, `msg:`}, + // bad oneof (should not panic) + {&pb.Communique{Union: &pb.Communique_Msg{nil}}, `msg:/* nil */`}, + } + for _, test := range tests { + got := strings.TrimSpace(test.m.String()) + if got != test.want { + t.Errorf("\n got %s\nwant %s", got, test.want) + } + } +} + +func BenchmarkMarshalTextBuffered(b *testing.B) { + buf := new(bytes.Buffer) + m := newTestMessage() + for i := 0; i < b.N; i++ { + buf.Reset() + proto.MarshalText(buf, m) + } +} + +func BenchmarkMarshalTextUnbuffered(b *testing.B) { + w := ioutil.Discard + m := newTestMessage() + for i := 0; i < b.N; i++ { + proto.MarshalText(w, m) + } +} + +func compact(src string) string { + // s/[ \n]+/ /g; s/ $//; + dst := make([]byte, len(src)) + space, comment := false, false + j := 0 + for i := 0; i < len(src); i++ { + if strings.HasPrefix(src[i:], "/*") { + comment = true + i++ + continue + } + if comment && strings.HasPrefix(src[i:], "*/") { + comment = false + i++ + continue + } + if comment { + continue + } + c := src[i] + if c == ' ' || c == '\n' { + space = true + continue + } + if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { + space = false + } + if c == '{' { + space = false + } + if space { + dst[j] = ' ' + j++ + space = false + } + dst[j] = c + j++ + } + if space { + dst[j] = ' ' + j++ + } + return string(dst[0:j]) +} + +var compactText = compact(text) + +func TestCompactText(t *testing.T) { + s := proto.CompactTextString(newTestMessage()) + if s != compactText { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) + } +} + +func TestStringEscaping(t *testing.T) { + testCases := []struct { + in *pb.Strings + out string + }{ + { + // Test data from C++ test (TextFormatTest.StringEscape). + // Single divergence: we don't escape apostrophes. + &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, + "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", + }, + { + // Test data from the same C++ test. + &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, + "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", + }, + { + // Some UTF-8. + &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, + `string_field: "\000\001\377\201"` + "\n", + }, + } + + for i, tc := range testCases { + var buf bytes.Buffer + if err := proto.MarshalText(&buf, tc.in); err != nil { + t.Errorf("proto.MarsalText: %v", err) + continue + } + s := buf.String() + if s != tc.out { + t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) + continue + } + + // Check round-trip. + pb := new(pb.Strings) + if err := proto.UnmarshalText(s, pb); err != nil { + t.Errorf("#%d: UnmarshalText: %v", i, err) + continue + } + if !proto.Equal(pb, tc.in) { + t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) + } + } +} + +// A limitedWriter accepts some output before it fails. +// This is a proxy for something like a nearly-full or imminently-failing disk, +// or a network connection that is about to die. +type limitedWriter struct { + b bytes.Buffer + limit int +} + +var outOfSpace = errors.New("proto: insufficient space") + +func (w *limitedWriter) Write(p []byte) (n int, err error) { + var avail = w.limit - w.b.Len() + if avail <= 0 { + return 0, outOfSpace + } + if len(p) <= avail { + return w.b.Write(p) + } + n, _ = w.b.Write(p[:avail]) + return n, outOfSpace +} + +func TestMarshalTextFailing(t *testing.T) { + // Try lots of different sizes to exercise more error code-paths. + for lim := 0; lim < len(text); lim++ { + buf := new(limitedWriter) + buf.limit = lim + err := proto.MarshalText(buf, newTestMessage()) + // We expect a certain error, but also some partial results in the buffer. + if err != outOfSpace { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) + } + s := buf.b.String() + x := text[:buf.limit] + if s != x { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) + } + } +} + +func TestFloats(t *testing.T) { + tests := []struct { + f float64 + want string + }{ + {0, "0"}, + {4.7, "4.7"}, + {math.Inf(1), "inf"}, + {math.Inf(-1), "-inf"}, + {math.NaN(), "nan"}, + } + for _, test := range tests { + msg := &pb.FloatingPoint{F: &test.f} + got := strings.TrimSpace(msg.String()) + want := `f:` + test.want + if got != want { + t.Errorf("f=%f: got %q, want %q", test.f, got, want) + } + } +} + +func TestRepeatedNilText(t *testing.T) { + m := &pb.MessageList{ + Message: []*pb.MessageList_Message{ + nil, + &pb.MessageList_Message{ + Name: proto.String("Horse"), + }, + nil, + }, + } + want := `Message +Message { + name: "Horse" +} +Message +` + if s := proto.MarshalTextString(m); s != want { + t.Errorf(" got: %s\nwant: %s", s, want) + } +} + +func TestProto3Text(t *testing.T) { + tests := []struct { + m proto.Message + want string + }{ + // zero message + {&proto3pb.Message{}, ``}, + // zero message except for an empty byte slice + {&proto3pb.Message{Data: []byte{}}, ``}, + // trivial case + {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, + // empty map + {&pb.MessageWithMap{}, ``}, + // non-empty map; map format is the same as a repeated struct, + // and they are sorted by key (numerically for numeric keys). + { + &pb.MessageWithMap{NameMapping: map[int32]string{ + -1: "Negatory", + 7: "Lucky", + 1234: "Feist", + 6345789: "Otis", + }}, + `name_mapping: ` + + `name_mapping: ` + + `name_mapping: ` + + `name_mapping:`, + }, + // map with nil value; not well-defined, but we shouldn't crash + { + &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}}, + `msg_mapping:`, + }, + } + for _, test := range tests { + got := strings.TrimSpace(test.m.String()) + if got != test.want { + t.Errorf("\n got %s\nwant %s", got, test.want) + } + } +} diff --git a/vendor/github.com/google/btree/BUILD b/vendor/github.com/google/btree/BUILD index 1c5555ba60c7..68ec558c84d9 100644 --- a/vendor/github.com/google/btree/BUILD +++ b/vendor/github.com/google/btree/BUILD @@ -1,14 +1,17 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["btree.go"], importpath = "github.com/google/btree", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["btree_test.go"], + importpath = "github.com/google/btree", + library = ":go_default_library", ) filegroup( @@ -22,4 +25,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/google/btree/btree_test.go b/vendor/github.com/google/btree/btree_test.go new file mode 100644 index 000000000000..43dcea63bb31 --- /dev/null +++ b/vendor/github.com/google/btree/btree_test.go @@ -0,0 +1,293 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package btree + +import ( + "flag" + "fmt" + "math/rand" + "reflect" + "testing" + "time" +) + +func init() { + seed := time.Now().Unix() + fmt.Println(seed) + rand.Seed(seed) +} + +// perm returns a random permutation of n Int items in the range [0, n). +func perm(n int) (out []Item) { + for _, v := range rand.Perm(n) { + out = append(out, Int(v)) + } + return +} + +// rang returns an ordered list of Int items in the range [0, n). +func rang(n int) (out []Item) { + for i := 0; i < n; i++ { + out = append(out, Int(i)) + } + return +} + +// all extracts all items from a tree in order as a slice. +func all(t *BTree) (out []Item) { + t.Ascend(func(a Item) bool { + out = append(out, a) + return true + }) + return +} + +var btreeDegree = flag.Int("degree", 32, "B-Tree degree") + +func TestBTree(t *testing.T) { + tr := New(*btreeDegree) + const treeSize = 10000 + for i := 0; i < 10; i++ { + for _, item := range perm(treeSize) { + if x := tr.ReplaceOrInsert(item); x != nil { + t.Fatal("insert found item", item) + } + } + for _, item := range perm(treeSize) { + if x := tr.ReplaceOrInsert(item); x == nil { + t.Fatal("insert didn't find item", item) + } + } + got := all(tr) + want := rang(treeSize) + if !reflect.DeepEqual(got, want) { + t.Fatalf("mismatch:\n got: %v\nwant: %v", got, want) + } + for _, item := range perm(treeSize) { + if x := tr.Delete(item); x == nil { + t.Fatalf("didn't find %v", item) + } + } + if got = all(tr); len(got) > 0 { + t.Fatalf("some left!: %v", got) + } + } +} + +func ExampleBTree() { + tr := New(*btreeDegree) + for i := Int(0); i < 10; i++ { + tr.ReplaceOrInsert(i) + } + fmt.Println("len: ", tr.Len()) + fmt.Println("get3: ", tr.Get(Int(3))) + fmt.Println("get100: ", tr.Get(Int(100))) + fmt.Println("del4: ", tr.Delete(Int(4))) + fmt.Println("del100: ", tr.Delete(Int(100))) + fmt.Println("replace5: ", tr.ReplaceOrInsert(Int(5))) + fmt.Println("replace100:", tr.ReplaceOrInsert(Int(100))) + fmt.Println("delmin: ", tr.DeleteMin()) + fmt.Println("delmax: ", tr.DeleteMax()) + fmt.Println("len: ", tr.Len()) + // Output: + // len: 10 + // get3: 3 + // get100: + // del4: 4 + // del100: + // replace5: 5 + // replace100: + // delmin: 0 + // delmax: 100 + // len: 8 +} + +func TestDeleteMin(t *testing.T) { + tr := New(3) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + for v := tr.DeleteMin(); v != nil; v = tr.DeleteMin() { + got = append(got, v) + } + if want := rang(100); !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } +} + +func TestDeleteMax(t *testing.T) { + tr := New(3) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + for v := tr.DeleteMax(); v != nil; v = tr.DeleteMax() { + got = append(got, v) + } + // Reverse our list. + for i := 0; i < len(got)/2; i++ { + got[i], got[len(got)-i-1] = got[len(got)-i-1], got[i] + } + if want := rang(100); !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } +} + +func TestAscendRange(t *testing.T) { + tr := New(2) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + tr.AscendRange(Int(40), Int(60), func(a Item) bool { + got = append(got, a) + return true + }) + if want := rang(100)[40:60]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } + got = got[:0] + tr.AscendRange(Int(40), Int(60), func(a Item) bool { + if a.(Int) > 50 { + return false + } + got = append(got, a) + return true + }) + if want := rang(100)[40:51]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } +} + +func TestAscendLessThan(t *testing.T) { + tr := New(*btreeDegree) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + tr.AscendLessThan(Int(60), func(a Item) bool { + got = append(got, a) + return true + }) + if want := rang(100)[:60]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } + got = got[:0] + tr.AscendLessThan(Int(60), func(a Item) bool { + if a.(Int) > 50 { + return false + } + got = append(got, a) + return true + }) + if want := rang(100)[:51]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } +} + +func TestAscendGreaterOrEqual(t *testing.T) { + tr := New(*btreeDegree) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + tr.AscendGreaterOrEqual(Int(40), func(a Item) bool { + got = append(got, a) + return true + }) + if want := rang(100)[40:]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } + got = got[:0] + tr.AscendGreaterOrEqual(Int(40), func(a Item) bool { + if a.(Int) > 50 { + return false + } + got = append(got, a) + return true + }) + if want := rang(100)[40:51]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } +} + +const benchmarkTreeSize = 10000 + +func BenchmarkInsert(b *testing.B) { + b.StopTimer() + insertP := perm(benchmarkTreeSize) + b.StartTimer() + i := 0 + for i < b.N { + tr := New(*btreeDegree) + for _, item := range insertP { + tr.ReplaceOrInsert(item) + i++ + if i >= b.N { + return + } + } + } +} + +func BenchmarkDelete(b *testing.B) { + b.StopTimer() + insertP := perm(benchmarkTreeSize) + removeP := perm(benchmarkTreeSize) + b.StartTimer() + i := 0 + for i < b.N { + b.StopTimer() + tr := New(*btreeDegree) + for _, v := range insertP { + tr.ReplaceOrInsert(v) + } + b.StartTimer() + for _, item := range removeP { + tr.Delete(item) + i++ + if i >= b.N { + return + } + } + if tr.Len() > 0 { + panic(tr.Len()) + } + } +} + +func BenchmarkGet(b *testing.B) { + b.StopTimer() + insertP := perm(benchmarkTreeSize) + removeP := perm(benchmarkTreeSize) + b.StartTimer() + i := 0 + for i < b.N { + b.StopTimer() + tr := New(*btreeDegree) + for _, v := range insertP { + tr.ReplaceOrInsert(v) + } + b.StartTimer() + for _, item := range removeP { + tr.Get(item) + i++ + if i >= b.N { + return + } + } + } +} diff --git a/vendor/github.com/google/go-github/.gitignore b/vendor/github.com/google/go-github/.gitignore new file mode 100644 index 000000000000..9ed3b07cefec --- /dev/null +++ b/vendor/github.com/google/go-github/.gitignore @@ -0,0 +1 @@ +*.test diff --git a/vendor/github.com/google/go-github/.travis.yml b/vendor/github.com/google/go-github/.travis.yml new file mode 100644 index 000000000000..b82916a002df --- /dev/null +++ b/vendor/github.com/google/go-github/.travis.yml @@ -0,0 +1,18 @@ +sudo: false +language: go +go: + - 1.7.x + - 1.8.x + - master +matrix: + allow_failures: + - go: master + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go generate -x ./... && git diff --exit-code; code=$?; git checkout -- .; (exit $code) # Check that go generate ./... produces a zero diff; clean up any changes afterwards. + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/google/go-github/CONTRIBUTING.md b/vendor/github.com/google/go-github/CONTRIBUTING.md new file mode 100644 index 000000000000..68a23f00512f --- /dev/null +++ b/vendor/github.com/google/go-github/CONTRIBUTING.md @@ -0,0 +1,114 @@ +# How to contribute # + +We'd love to accept your patches and contributions to this project. There are +a just a few small guidelines you need to follow. + + +## Contributor License Agreement ## + +Contributions to any Google project must be accompanied by a Contributor +License Agreement. This is not a copyright **assignment**, it simply gives +Google permission to use and redistribute your contributions as part of the +project. Head over to to see your current +agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + + +## Submitting a patch ## + + 1. It's generally best to start by opening a new issue describing the bug or + feature you're intending to fix. Even if you think it's relatively minor, + it's helpful to know what people are working on. Mention in the initial + issue that you are planning to work on that bug or feature so that it can + be assigned to you. + + 1. Follow the normal process of [forking][] the project, and setup a new + branch to work in. It's important that each group of changes be done in + separate branches in order to ensure that a pull request only includes the + commits related to that bug or feature. + + 1. Go makes it very simple to ensure properly formatted code, so always run + `go fmt` on your code before committing it. You should also run + [golint][] over your code. As noted in the [golint readme][], it's not + strictly necessary that your code be completely "lint-free", but this will + help you find common style issues. + + 1. Any significant changes should almost always be accompanied by tests. The + project already has good test coverage, so look at some of the existing + tests if you're unsure how to go about it. [gocov][] and [gocov-html][] + are invaluable tools for seeing which parts of your code aren't being + exercised by your tests. + + 1. Please run: + * `go generate github.com/google/go-github/...` + * `go test github.com/google/go-github/...` + * `go vet github.com/google/go-github/...` + + 1. Do your best to have [well-formed commit messages][] for each change. + This provides consistency throughout the project, and ensures that commit + messages are able to be formatted properly by various git tools. + + 1. Finally, push the commits to your fork and submit a [pull request][]. + +[forking]: https://help.github.com/articles/fork-a-repo +[golint]: https://github.com/golang/lint +[golint readme]: https://github.com/golang/lint/blob/master/README +[gocov]: https://github.com/axw/gocov +[gocov-html]: https://github.com/matm/gocov-html +[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html +[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits +[pull request]: https://help.github.com/articles/creating-a-pull-request + + +## Other notes on code organization ## + +Currently, everything is defined in the main `github` package, with API methods +broken into separate service objects. These services map directly to how +the [GitHub API documentation][] is organized, so use that as your guide for +where to put new methods. + +Code is organized in files also based pretty closely on the GitHub API +documentation, following the format `{service}_{api}.go`. For example, methods +defined at live in +[repos_hooks.go][]. + +[GitHub API documentation]: https://developer.github.com/v3/ +[repos_hooks.go]: https://github.com/google/go-github/blob/master/github/repos_hooks.go + + +## Maintainer's Guide ## + +(These notes are mostly only for people merging in pull requests.) + +**Verify CLAs.** CLAs must be on file for the pull request submitter and commit +author(s). Google's CLA verification system should handle this automatically +and will set commit statuses as appropriate. If there's ever any question about +a pull request, ask [willnorris](https://github.com/willnorris). + +**Always try to maintain a clean, linear git history.** With very few +exceptions, running `git log` should not show a bunch of branching and merging. + +Never use the GitHub "merge" button, since it always creates a merge commit. +Instead, check out the pull request locally ([these git aliases +help][git-aliases]), then cherry-pick or rebase them onto master. If there are +small cleanup commits, especially as a result of addressing code review +comments, these should almost always be squashed down to a single commit. Don't +bother squashing commits that really deserve to be separate though. If needed, +feel free to amend additional small changes to the code or commit message that +aren't worth going through code review for. + +If you made any changes like squashing commits, rebasing onto master, etc, then +GitHub won't recognize that this is the same commit in order to mark the pull +request as "merged". So instead, amend the commit message to include a line +"Fixes #0", referencing the pull request number. This would be in addition to +any other "Fixes" lines for closing related issues. If you forget to do this, +you can also leave a comment on the pull request [like this][rebase-comment]. +If you made any other changes, it's worth noting that as well, [like +this][modified-comment]. + +[git-aliases]: https://github.com/willnorris/dotfiles/blob/d640d010c23b1116bdb3d4dc12088ed26120d87d/git/.gitconfig#L13-L15 +[rebase-comment]: https://github.com/google/go-github/pull/277#issuecomment-183035491 +[modified-comment]: https://github.com/google/go-github/pull/280#issuecomment-184859046 diff --git a/vendor/github.com/google/go-github/README.md b/vendor/github.com/google/go-github/README.md new file mode 100644 index 000000000000..9c12172b57e4 --- /dev/null +++ b/vendor/github.com/google/go-github/README.md @@ -0,0 +1,237 @@ +# go-github # + +go-github is a Go client library for accessing the [GitHub API][]. + +**Documentation:** [![GoDoc](https://godoc.org/github.com/google/go-github/github?status.svg)](https://godoc.org/github.com/google/go-github/github) +**Mailing List:** [go-github@googlegroups.com](https://groups.google.com/group/go-github) +**Build Status:** [![Build Status](https://travis-ci.org/google/go-github.svg?branch=master)](https://travis-ci.org/google/go-github) +**Test Coverage:** [![Test Coverage](https://coveralls.io/repos/google/go-github/badge.svg?branch=master)](https://coveralls.io/r/google/go-github?branch=master) ([gocov report](https://drone.io/github.com/google/go-github/files/coverage.html)) + +go-github requires Go version 1.7 or greater. + +## Usage ## + +```go +import "github.com/google/go-github/github" +``` + +Construct a new GitHub client, then use the various services on the client to +access different parts of the GitHub API. For example: + +```go +client := github.NewClient(nil) + +// list all organizations for user "willnorris" +orgs, _, err := client.Organizations.List(ctx, "willnorris", nil) +``` + +Some API methods have optional parameters that can be passed. For example: + +```go +client := github.NewClient(nil) + +// list public repositories for org "github" +opt := &github.RepositoryListByOrgOptions{Type: "public"} +repos, _, err := client.Repositories.ListByOrg(ctx, "github", opt) +``` + +The services of a client divide the API into logical chunks and correspond to +the structure of the GitHub API documentation at +https://developer.github.com/v3/. + +### Authentication ### + +The go-github library does not directly handle authentication. Instead, when +creating a new client, pass an `http.Client` that can handle authentication for +you. The easiest and recommended way to do this is using the [oauth2][] +library, but you can always use any other library that provides an +`http.Client`. If you have an OAuth2 access token (for example, a [personal +API token][]), you can use it with the oauth2 library using: + +```go +import "golang.org/x/oauth2" + +func main() { + ctx := context.Background() + ts := oauth2.StaticTokenSource( + &oauth2.Token{AccessToken: "... your access token ..."}, + ) + tc := oauth2.NewClient(ctx, ts) + + client := github.NewClient(tc) + + // list all repositories for the authenticated user + repos, _, err := client.Repositories.List(ctx, "", nil) +} +``` + +Note that when using an authenticated Client, all calls made by the client will +include the specified OAuth token. Therefore, authenticated clients should +almost never be shared between different users. + +See the [oauth2 docs][] for complete instructions on using that library. + +For API methods that require HTTP Basic Authentication, use the +[`BasicAuthTransport`](https://godoc.org/github.com/google/go-github/github#BasicAuthTransport). + +GitHub Apps authentication can be provided by the [ghinstallation](https://github.com/bradleyfalzon/ghinstallation) +package. + +```go +import "github.com/bradleyfalzon/ghinstallation" + +func main() { + // Wrap the shared transport for use with the integration ID 1 authenticating with installation ID 99. + itr, err := ghinstallation.NewKeyFromFile(http.DefaultTransport, 1, 99, "2016-10-19.private-key.pem") + if err != nil { + // Handle error. + } + + // Use installation transport with client. + client := github.NewClient(&http.Client{Transport: itr}) + + // Use client... +} +``` + +### Rate Limiting ### + +GitHub imposes a rate limit on all API clients. Unauthenticated clients are +limited to 60 requests per hour, while authenticated clients can make up to +5,000 requests per hour. To receive the higher rate limit when making calls +that are not issued on behalf of a user, use the +`UnauthenticatedRateLimitedTransport`. + +The returned `Response.Rate` value contains the rate limit information +from the most recent API call. If a recent enough response isn't +available, you can use `RateLimits` to fetch the most up-to-date rate +limit data for the client. + +To detect an API rate limit error, you can check if its type is `*github.RateLimitError`: + +```go +repos, _, err := client.Repositories.List(ctx, "", nil) +if _, ok := err.(*github.RateLimitError); ok { + log.Println("hit rate limit") +} +``` + +Learn more about GitHub rate limiting at +https://developer.github.com/v3/#rate-limiting. + +### Accepted Status ### + +Some endpoints may return a 202 Accepted status code, meaning that the +information required is not yet ready and was scheduled to be gathered on +the GitHub side. Methods known to behave like this are documented specifying +this behavior. + +To detect this condition of error, you can check if its type is +`*github.AcceptedError`: + +```go +stats, _, err := client.Repositories.ListContributorsStats(ctx, org, repo) +if _, ok := err.(*github.AcceptedError); ok { + log.Println("scheduled on GitHub side") +} +``` + +### Conditional Requests ### + +The GitHub API has good support for conditional requests which will help +prevent you from burning through your rate limit, as well as help speed up your +application. `go-github` does not handle conditional requests directly, but is +instead designed to work with a caching `http.Transport`. We recommend using +https://github.com/gregjones/httpcache for that. + +Learn more about GitHub conditional requests at +https://developer.github.com/v3/#conditional-requests. + +### Creating and Updating Resources ### + +All structs for GitHub resources use pointer values for all non-repeated fields. +This allows distinguishing between unset fields and those set to a zero-value. +Helper functions have been provided to easily create these pointers for string, +bool, and int values. For example: + +```go +// create a new private repository named "foo" +repo := &github.Repository{ + Name: github.String("foo"), + Private: github.Bool(true), +} +client.Repositories.Create(ctx, "", repo) +``` + +Users who have worked with protocol buffers should find this pattern familiar. + +### Pagination ### + +All requests for resource collections (repos, pull requests, issues, etc.) +support pagination. Pagination options are described in the +`github.ListOptions` struct and passed to the list methods directly or as an +embedded type of a more specific list options struct (for example +`github.PullRequestListOptions`). Pages information is available via the +`github.Response` struct. + +```go +client := github.NewClient(nil) + +opt := &github.RepositoryListByOrgOptions{ + ListOptions: github.ListOptions{PerPage: 10}, +} +// get all pages of results +var allRepos []*github.Repository +for { + repos, resp, err := client.Repositories.ListByOrg(ctx, "github", opt) + if err != nil { + return err + } + allRepos = append(allRepos, repos...) + if resp.NextPage == 0 { + break + } + opt.Page = resp.NextPage +} +``` + +For complete usage of go-github, see the full [package docs][]. + +[GitHub API]: https://developer.github.com/v3/ +[oauth2]: https://github.com/golang/oauth2 +[oauth2 docs]: https://godoc.org/golang.org/x/oauth2 +[personal API token]: https://github.com/blog/1509-personal-api-tokens +[package docs]: https://godoc.org/github.com/google/go-github/github + +### Integration Tests ### + +You can run integration tests from the `tests` directory. See the integration tests [README](tests/README.md). +## Roadmap ## + +This library is being initially developed for an internal application at +Google, so API methods will likely be implemented in the order that they are +needed by that application. You can track the status of implementation in +[this Google spreadsheet][roadmap]. Eventually, I would like to cover the entire +GitHub API, so contributions are of course [always welcome][contributing]. The +calling pattern is pretty well established, so adding new methods is relatively +straightforward. + +[roadmap]: https://docs.google.com/spreadsheet/ccc?key=0ApoVX4GOiXr-dGNKN1pObFh6ek1DR2FKUjBNZ1FmaEE&usp=sharing +[contributing]: CONTRIBUTING.md + + +## Google App Engine ## + +Go on App Engine Classic (which as of this writing uses Go 1.6) can not use +the `"context"` import and still relies on `"golang.org/x/net/context"`. +As a result, if you wish to continue to use `go-github` on App Engine Classic, +you will need to rewrite all the `"context"` imports using the following command: + + `gofmt -w -r '"context" -> "golang.org/x/net/context"' *.go` + +See `with_appengine.go` for more details. + +## License ## + +This library is distributed under the BSD-style license found in the [LICENSE](./LICENSE) +file. diff --git a/vendor/github.com/google/go-github/github/BUILD b/vendor/github.com/google/go-github/github/BUILD index f20b3a2ac0a7..846cce923f57 100644 --- a/vendor/github.com/google/go-github/github/BUILD +++ b/vendor/github.com/google/go-github/github/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -86,9 +81,96 @@ go_library( "without_appengine.go", ], importpath = "github.com/google/go-github/github", + visibility = ["//visibility:public"], deps = ["//vendor/github.com/google/go-querystring/query:go_default_library"], ) +go_test( + name = "go_default_test", + srcs = [ + "activity_events_test.go", + "activity_notifications_test.go", + "activity_star_test.go", + "activity_test.go", + "activity_watching_test.go", + "admin_test.go", + "apps_installation_test.go", + "apps_test.go", + "authorizations_test.go", + "gists_comments_test.go", + "gists_test.go", + "git_blobs_test.go", + "git_commits_test.go", + "git_refs_test.go", + "git_tags_test.go", + "git_trees_test.go", + "github_test.go", + "gitignore_test.go", + "issues_assignees_test.go", + "issues_comments_test.go", + "issues_events_test.go", + "issues_labels_test.go", + "issues_milestones_test.go", + "issues_test.go", + "issues_timeline_test.go", + "licenses_test.go", + "messages_test.go", + "migrations_source_import_test.go", + "migrations_test.go", + "misc_test.go", + "orgs_hooks_test.go", + "orgs_members_test.go", + "orgs_outside_collaborators_test.go", + "orgs_projects_test.go", + "orgs_teams_test.go", + "orgs_test.go", + "orgs_users_blocking_test.go", + "projects_test.go", + "pulls_comments_test.go", + "pulls_reviewers_test.go", + "pulls_reviews_test.go", + "pulls_test.go", + "reactions_test.go", + "repos_collaborators_test.go", + "repos_comments_test.go", + "repos_commits_test.go", + "repos_community_health_test.go", + "repos_contents_test.go", + "repos_deployments_test.go", + "repos_forks_test.go", + "repos_hooks_test.go", + "repos_invitations_test.go", + "repos_keys_test.go", + "repos_merging_test.go", + "repos_pages_test.go", + "repos_projects_test.go", + "repos_releases_test.go", + "repos_stats_test.go", + "repos_statuses_test.go", + "repos_test.go", + "repos_traffic_test.go", + "search_test.go", + "strings_test.go", + "timestamp_test.go", + "users_administration_test.go", + "users_blocking_test.go", + "users_emails_test.go", + "users_followers_test.go", + "users_gpg_keys_test.go", + "users_keys_test.go", + "users_test.go", + ], + importpath = "github.com/google/go-github/github", + library = ":go_default_library", +) + +go_test( + name = "go_default_xtest", + srcs = ["examples_test.go"], + importpath = "github.com/google/go-github/github_test", + deps = [":go_default_library"], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -100,4 +182,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/google/go-github/github/activity_events_test.go b/vendor/github.com/google/go-github/github/activity_events_test.go new file mode 100644 index 000000000000..2b59d11be8e0 --- /dev/null +++ b/vendor/github.com/google/go-github/github/activity_events_test.go @@ -0,0 +1,331 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestActivityService_ListEvents(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/events", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`) + }) + + opt := &ListOptions{Page: 2} + events, _, err := client.Activity.ListEvents(context.Background(), opt) + if err != nil { + t.Errorf("Activities.ListEvents returned error: %v", err) + } + + want := []*Event{{ID: String("1")}, {ID: String("2")}} + if !reflect.DeepEqual(events, want) { + t.Errorf("Activities.ListEvents returned %+v, want %+v", events, want) + } +} + +func TestActivityService_ListRepositoryEvents(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/events", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`) + }) + + opt := &ListOptions{Page: 2} + events, _, err := client.Activity.ListRepositoryEvents(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Activities.ListRepositoryEvents returned error: %v", err) + } + + want := []*Event{{ID: String("1")}, {ID: String("2")}} + if !reflect.DeepEqual(events, want) { + t.Errorf("Activities.ListRepositoryEvents returned %+v, want %+v", events, want) + } +} + +func TestActivityService_ListRepositoryEvents_invalidOwner(t *testing.T) { + _, _, err := client.Activity.ListRepositoryEvents(context.Background(), "%", "%", nil) + testURLParseError(t, err) +} + +func TestActivityService_ListIssueEventsForRepository(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/events", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + fmt.Fprint(w, `[{"id":1},{"id":2}]`) + }) + + opt := &ListOptions{Page: 2} + events, _, err := client.Activity.ListIssueEventsForRepository(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Activities.ListIssueEventsForRepository returned error: %v", err) + } + + want := []*IssueEvent{{ID: Int(1)}, {ID: Int(2)}} + if !reflect.DeepEqual(events, want) { + t.Errorf("Activities.ListIssueEventsForRepository returned %+v, want %+v", events, want) + } +} + +func TestActivityService_ListIssueEventsForRepository_invalidOwner(t *testing.T) { + _, _, err := client.Activity.ListIssueEventsForRepository(context.Background(), "%", "%", nil) + testURLParseError(t, err) +} + +func TestActivityService_ListEventsForRepoNetwork(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/networks/o/r/events", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`) + }) + + opt := &ListOptions{Page: 2} + events, _, err := client.Activity.ListEventsForRepoNetwork(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Activities.ListEventsForRepoNetwork returned error: %v", err) + } + + want := []*Event{{ID: String("1")}, {ID: String("2")}} + if !reflect.DeepEqual(events, want) { + t.Errorf("Activities.ListEventsForRepoNetwork returned %+v, want %+v", events, want) + } +} + +func TestActivityService_ListEventsForRepoNetwork_invalidOwner(t *testing.T) { + _, _, err := client.Activity.ListEventsForRepoNetwork(context.Background(), "%", "%", nil) + testURLParseError(t, err) +} + +func TestActivityService_ListEventsForOrganization(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/events", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`) + }) + + opt := &ListOptions{Page: 2} + events, _, err := client.Activity.ListEventsForOrganization(context.Background(), "o", opt) + if err != nil { + t.Errorf("Activities.ListEventsForOrganization returned error: %v", err) + } + + want := []*Event{{ID: String("1")}, {ID: String("2")}} + if !reflect.DeepEqual(events, want) { + t.Errorf("Activities.ListEventsForOrganization returned %+v, want %+v", events, want) + } +} + +func TestActivityService_ListEventsForOrganization_invalidOrg(t *testing.T) { + _, _, err := client.Activity.ListEventsForOrganization(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestActivityService_ListEventsPerformedByUser_all(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/events", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`) + }) + + opt := &ListOptions{Page: 2} + events, _, err := client.Activity.ListEventsPerformedByUser(context.Background(), "u", false, opt) + if err != nil { + t.Errorf("Events.ListPerformedByUser returned error: %v", err) + } + + want := []*Event{{ID: String("1")}, {ID: String("2")}} + if !reflect.DeepEqual(events, want) { + t.Errorf("Events.ListPerformedByUser returned %+v, want %+v", events, want) + } +} + +func TestActivityService_ListEventsPerformedByUser_publicOnly(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/events/public", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`) + }) + + events, _, err := client.Activity.ListEventsPerformedByUser(context.Background(), "u", true, nil) + if err != nil { + t.Errorf("Events.ListPerformedByUser returned error: %v", err) + } + + want := []*Event{{ID: String("1")}, {ID: String("2")}} + if !reflect.DeepEqual(events, want) { + t.Errorf("Events.ListPerformedByUser returned %+v, want %+v", events, want) + } +} + +func TestActivityService_ListEventsPerformedByUser_invalidUser(t *testing.T) { + _, _, err := client.Activity.ListEventsPerformedByUser(context.Background(), "%", false, nil) + testURLParseError(t, err) +} + +func TestActivityService_ListEventsReceivedByUser_all(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/received_events", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`) + }) + + opt := &ListOptions{Page: 2} + events, _, err := client.Activity.ListEventsReceivedByUser(context.Background(), "u", false, opt) + if err != nil { + t.Errorf("Events.ListReceivedByUser returned error: %v", err) + } + + want := []*Event{{ID: String("1")}, {ID: String("2")}} + if !reflect.DeepEqual(events, want) { + t.Errorf("Events.ListReceivedUser returned %+v, want %+v", events, want) + } +} + +func TestActivityService_ListEventsReceivedByUser_publicOnly(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/received_events/public", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`) + }) + + events, _, err := client.Activity.ListEventsReceivedByUser(context.Background(), "u", true, nil) + if err != nil { + t.Errorf("Events.ListReceivedByUser returned error: %v", err) + } + + want := []*Event{{ID: String("1")}, {ID: String("2")}} + if !reflect.DeepEqual(events, want) { + t.Errorf("Events.ListReceivedByUser returned %+v, want %+v", events, want) + } +} + +func TestActivityService_ListEventsReceivedByUser_invalidUser(t *testing.T) { + _, _, err := client.Activity.ListEventsReceivedByUser(context.Background(), "%", false, nil) + testURLParseError(t, err) +} + +func TestActivityService_ListUserEventsForOrganization(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/events/orgs/o", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`) + }) + + opt := &ListOptions{Page: 2} + events, _, err := client.Activity.ListUserEventsForOrganization(context.Background(), "o", "u", opt) + if err != nil { + t.Errorf("Activities.ListUserEventsForOrganization returned error: %v", err) + } + + want := []*Event{{ID: String("1")}, {ID: String("2")}} + if !reflect.DeepEqual(events, want) { + t.Errorf("Activities.ListUserEventsForOrganization returned %+v, want %+v", events, want) + } +} + +func TestActivityService_EventParsePayload_typed(t *testing.T) { + raw := []byte(`{"type": "PushEvent","payload":{"push_id": 1}}`) + var event *Event + if err := json.Unmarshal(raw, &event); err != nil { + t.Fatalf("Unmarshal Event returned error: %v", err) + } + + want := &PushEvent{PushID: Int(1)} + got, err := event.ParsePayload() + if err != nil { + t.Fatalf("ParsePayload returned unexpected error: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Event.ParsePayload returned %+v, want %+v", got, want) + } +} + +// TestEvent_Payload_untyped checks that unrecognized events are parsed to an +// interface{} value (instead of being discarded or throwing an error), for +// forward compatibility with new event types. +func TestActivityService_EventParsePayload_untyped(t *testing.T) { + raw := []byte(`{"type": "UnrecognizedEvent","payload":{"field": "val"}}`) + var event *Event + if err := json.Unmarshal(raw, &event); err != nil { + t.Fatalf("Unmarshal Event returned error: %v", err) + } + + want := map[string]interface{}{"field": "val"} + got, err := event.ParsePayload() + if err != nil { + t.Fatalf("ParsePayload returned unexpected error: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Event.ParsePayload returned %+v, want %+v", got, want) + } +} + +func TestActivityService_EventParsePayload_installation(t *testing.T) { + raw := []byte(`{"type": "PullRequestEvent","payload":{"installation":{"id":1}}}`) + var event *Event + if err := json.Unmarshal(raw, &event); err != nil { + t.Fatalf("Unmarshal Event returned error: %v", err) + } + + want := &PullRequestEvent{Installation: &Installation{ID: Int(1)}} + got, err := event.ParsePayload() + if err != nil { + t.Fatalf("ParsePayload returned unexpected error: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Event.ParsePayload returned %+v, want %+v", got, want) + } +} diff --git a/vendor/github.com/google/go-github/github/activity_notifications_test.go b/vendor/github.com/google/go-github/github/activity_notifications_test.go new file mode 100644 index 000000000000..cab8f94da8df --- /dev/null +++ b/vendor/github.com/google/go-github/github/activity_notifications_test.go @@ -0,0 +1,204 @@ +// Copyright 2014 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" + "time" +) + +func TestActivityService_ListNotification(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/notifications", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "all": "true", + "participating": "true", + "since": "2006-01-02T15:04:05Z", + "before": "2007-03-04T15:04:05Z", + }) + + fmt.Fprint(w, `[{"id":"1", "subject":{"title":"t"}}]`) + }) + + opt := &NotificationListOptions{ + All: true, + Participating: true, + Since: time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC), + Before: time.Date(2007, 03, 04, 15, 04, 05, 0, time.UTC), + } + notifications, _, err := client.Activity.ListNotifications(context.Background(), opt) + if err != nil { + t.Errorf("Activity.ListNotifications returned error: %v", err) + } + + want := []*Notification{{ID: String("1"), Subject: &NotificationSubject{Title: String("t")}}} + if !reflect.DeepEqual(notifications, want) { + t.Errorf("Activity.ListNotifications returned %+v, want %+v", notifications, want) + } +} + +func TestActivityService_ListRepositoryNotification(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/notifications", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[{"id":"1"}]`) + }) + + notifications, _, err := client.Activity.ListRepositoryNotifications(context.Background(), "o", "r", nil) + if err != nil { + t.Errorf("Activity.ListRepositoryNotifications returned error: %v", err) + } + + want := []*Notification{{ID: String("1")}} + if !reflect.DeepEqual(notifications, want) { + t.Errorf("Activity.ListRepositoryNotifications returned %+v, want %+v", notifications, want) + } +} + +func TestActivityService_MarkNotificationsRead(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/notifications", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PUT") + testHeader(t, r, "Content-Type", "application/json") + testBody(t, r, `{"last_read_at":"2006-01-02T15:04:05Z"}`+"\n") + + w.WriteHeader(http.StatusResetContent) + }) + + _, err := client.Activity.MarkNotificationsRead(context.Background(), time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)) + if err != nil { + t.Errorf("Activity.MarkNotificationsRead returned error: %v", err) + } +} + +func TestActivityService_MarkRepositoryNotificationsRead(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/notifications", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PUT") + testHeader(t, r, "Content-Type", "application/json") + testBody(t, r, `{"last_read_at":"2006-01-02T15:04:05Z"}`+"\n") + + w.WriteHeader(http.StatusResetContent) + }) + + _, err := client.Activity.MarkRepositoryNotificationsRead(context.Background(), "o", "r", time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)) + if err != nil { + t.Errorf("Activity.MarkRepositoryNotificationsRead returned error: %v", err) + } +} + +func TestActivityService_GetThread(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/notifications/threads/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":"1"}`) + }) + + notification, _, err := client.Activity.GetThread(context.Background(), "1") + if err != nil { + t.Errorf("Activity.GetThread returned error: %v", err) + } + + want := &Notification{ID: String("1")} + if !reflect.DeepEqual(notification, want) { + t.Errorf("Activity.GetThread returned %+v, want %+v", notification, want) + } +} + +func TestActivityService_MarkThreadRead(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/notifications/threads/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PATCH") + w.WriteHeader(http.StatusResetContent) + }) + + _, err := client.Activity.MarkThreadRead(context.Background(), "1") + if err != nil { + t.Errorf("Activity.MarkThreadRead returned error: %v", err) + } +} + +func TestActivityService_GetThreadSubscription(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/notifications/threads/1/subscription", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"subscribed":true}`) + }) + + sub, _, err := client.Activity.GetThreadSubscription(context.Background(), "1") + if err != nil { + t.Errorf("Activity.GetThreadSubscription returned error: %v", err) + } + + want := &Subscription{Subscribed: Bool(true)} + if !reflect.DeepEqual(sub, want) { + t.Errorf("Activity.GetThreadSubscription returned %+v, want %+v", sub, want) + } +} + +func TestActivityService_SetThreadSubscription(t *testing.T) { + setup() + defer teardown() + + input := &Subscription{Subscribed: Bool(true)} + + mux.HandleFunc("/notifications/threads/1/subscription", func(w http.ResponseWriter, r *http.Request) { + v := new(Subscription) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PUT") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"ignored":true}`) + }) + + sub, _, err := client.Activity.SetThreadSubscription(context.Background(), "1", input) + if err != nil { + t.Errorf("Activity.SetThreadSubscription returned error: %v", err) + } + + want := &Subscription{Ignored: Bool(true)} + if !reflect.DeepEqual(sub, want) { + t.Errorf("Activity.SetThreadSubscription returned %+v, want %+v", sub, want) + } +} + +func TestActivityService_DeleteThreadSubscription(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/notifications/threads/1/subscription", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Activity.DeleteThreadSubscription(context.Background(), "1") + if err != nil { + t.Errorf("Activity.DeleteThreadSubscription returned error: %v", err) + } +} diff --git a/vendor/github.com/google/go-github/github/activity_star_test.go b/vendor/github.com/google/go-github/github/activity_star_test.go new file mode 100644 index 000000000000..542bfc66b4b7 --- /dev/null +++ b/vendor/github.com/google/go-github/github/activity_star_test.go @@ -0,0 +1,172 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" + "time" +) + +func TestActivityService_ListStargazers(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/stargazers", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeStarringPreview) + testFormValues(t, r, values{ + "page": "2", + }) + + fmt.Fprint(w, `[{"starred_at":"2002-02-10T15:30:00Z","user":{"id":1}}]`) + }) + + stargazers, _, err := client.Activity.ListStargazers(context.Background(), "o", "r", &ListOptions{Page: 2}) + if err != nil { + t.Errorf("Activity.ListStargazers returned error: %v", err) + } + + want := []*Stargazer{{StarredAt: &Timestamp{time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC)}, User: &User{ID: Int(1)}}} + if !reflect.DeepEqual(stargazers, want) { + t.Errorf("Activity.ListStargazers returned %+v, want %+v", stargazers, want) + } +} + +func TestActivityService_ListStarred_authenticatedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/starred", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeStarringPreview) + fmt.Fprint(w, `[{"starred_at":"2002-02-10T15:30:00Z","repo":{"id":1}}]`) + }) + + repos, _, err := client.Activity.ListStarred(context.Background(), "", nil) + if err != nil { + t.Errorf("Activity.ListStarred returned error: %v", err) + } + + want := []*StarredRepository{{StarredAt: &Timestamp{time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC)}, Repository: &Repository{ID: Int(1)}}} + if !reflect.DeepEqual(repos, want) { + t.Errorf("Activity.ListStarred returned %+v, want %+v", repos, want) + } +} + +func TestActivityService_ListStarred_specifiedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/starred", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeStarringPreview) + testFormValues(t, r, values{ + "sort": "created", + "direction": "asc", + "page": "2", + }) + fmt.Fprint(w, `[{"starred_at":"2002-02-10T15:30:00Z","repo":{"id":2}}]`) + }) + + opt := &ActivityListStarredOptions{"created", "asc", ListOptions{Page: 2}} + repos, _, err := client.Activity.ListStarred(context.Background(), "u", opt) + if err != nil { + t.Errorf("Activity.ListStarred returned error: %v", err) + } + + want := []*StarredRepository{{StarredAt: &Timestamp{time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC)}, Repository: &Repository{ID: Int(2)}}} + if !reflect.DeepEqual(repos, want) { + t.Errorf("Activity.ListStarred returned %+v, want %+v", repos, want) + } +} + +func TestActivityService_ListStarred_invalidUser(t *testing.T) { + _, _, err := client.Activity.ListStarred(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestActivityService_IsStarred_hasStar(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/starred/o/r", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNoContent) + }) + + star, _, err := client.Activity.IsStarred(context.Background(), "o", "r") + if err != nil { + t.Errorf("Activity.IsStarred returned error: %v", err) + } + if want := true; star != want { + t.Errorf("Activity.IsStarred returned %+v, want %+v", star, want) + } +} + +func TestActivityService_IsStarred_noStar(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/starred/o/r", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNotFound) + }) + + star, _, err := client.Activity.IsStarred(context.Background(), "o", "r") + if err != nil { + t.Errorf("Activity.IsStarred returned error: %v", err) + } + if want := false; star != want { + t.Errorf("Activity.IsStarred returned %+v, want %+v", star, want) + } +} + +func TestActivityService_IsStarred_invalidID(t *testing.T) { + _, _, err := client.Activity.IsStarred(context.Background(), "%", "%") + testURLParseError(t, err) +} + +func TestActivityService_Star(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/starred/o/r", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PUT") + }) + + _, err := client.Activity.Star(context.Background(), "o", "r") + if err != nil { + t.Errorf("Activity.Star returned error: %v", err) + } +} + +func TestActivityService_Star_invalidID(t *testing.T) { + _, err := client.Activity.Star(context.Background(), "%", "%") + testURLParseError(t, err) +} + +func TestActivityService_Unstar(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/starred/o/r", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Activity.Unstar(context.Background(), "o", "r") + if err != nil { + t.Errorf("Activity.Unstar returned error: %v", err) + } +} + +func TestActivityService_Unstar_invalidID(t *testing.T) { + _, err := client.Activity.Unstar(context.Background(), "%", "%") + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/activity_test.go b/vendor/github.com/google/go-github/github/activity_test.go new file mode 100644 index 000000000000..98567a254b6b --- /dev/null +++ b/vendor/github.com/google/go-github/github/activity_test.go @@ -0,0 +1,129 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "net/http" + "reflect" + "testing" +) + +func TestActivityService_List(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/feeds", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + + w.WriteHeader(http.StatusOK) + w.Write(feedsJSON) + }) + + got, _, err := client.Activity.ListFeeds(context.Background()) + if err != nil { + t.Errorf("Activity.ListFeeds returned error: %v", err) + } + if want := wantFeeds; !reflect.DeepEqual(got, want) { + t.Errorf("Activity.ListFeeds = %+v, want %+v", got, want) + } +} + +var feedsJSON = []byte(`{ + "timeline_url": "https://github.com/timeline", + "user_url": "https://github.com/{user}", + "current_user_public_url": "https://github.com/defunkt", + "current_user_url": "https://github.com/defunkt.private?token=abc123", + "current_user_actor_url": "https://github.com/defunkt.private.actor?token=abc123", + "current_user_organization_url": "", + "current_user_organization_urls": [ + "https://github.com/organizations/github/defunkt.private.atom?token=abc123" + ], + "_links": { + "timeline": { + "href": "https://github.com/timeline", + "type": "application/atom+xml" + }, + "user": { + "href": "https://github.com/{user}", + "type": "application/atom+xml" + }, + "current_user_public": { + "href": "https://github.com/defunkt", + "type": "application/atom+xml" + }, + "current_user": { + "href": "https://github.com/defunkt.private?token=abc123", + "type": "application/atom+xml" + }, + "current_user_actor": { + "href": "https://github.com/defunkt.private.actor?token=abc123", + "type": "application/atom+xml" + }, + "current_user_organization": { + "href": "", + "type": "" + }, + "current_user_organizations": [ + { + "href": "https://github.com/organizations/github/defunkt.private.atom?token=abc123", + "type": "application/atom+xml" + } + ] + } +}`) + +var wantFeeds = &Feeds{ + TimelineURL: String("https://github.com/timeline"), + UserURL: String("https://github.com/{user}"), + CurrentUserPublicURL: String("https://github.com/defunkt"), + CurrentUserURL: String("https://github.com/defunkt.private?token=abc123"), + CurrentUserActorURL: String("https://github.com/defunkt.private.actor?token=abc123"), + CurrentUserOrganizationURL: String(""), + CurrentUserOrganizationURLs: []string{ + "https://github.com/organizations/github/defunkt.private.atom?token=abc123", + }, + Links: &struct { + Timeline *FeedLink `json:"timeline,omitempty"` + User *FeedLink `json:"user,omitempty"` + CurrentUserPublic *FeedLink `json:"current_user_public,omitempty"` + CurrentUser *FeedLink `json:"current_user,omitempty"` + CurrentUserActor *FeedLink `json:"current_user_actor,omitempty"` + CurrentUserOrganization *FeedLink `json:"current_user_organization,omitempty"` + CurrentUserOrganizations []FeedLink `json:"current_user_organizations,omitempty"` + }{ + Timeline: &FeedLink{ + HRef: String("https://github.com/timeline"), + Type: String("application/atom+xml"), + }, + User: &FeedLink{ + HRef: String("https://github.com/{user}"), + Type: String("application/atom+xml"), + }, + CurrentUserPublic: &FeedLink{ + HRef: String("https://github.com/defunkt"), + Type: String("application/atom+xml"), + }, + CurrentUser: &FeedLink{ + HRef: String("https://github.com/defunkt.private?token=abc123"), + Type: String("application/atom+xml"), + }, + CurrentUserActor: &FeedLink{ + HRef: String("https://github.com/defunkt.private.actor?token=abc123"), + Type: String("application/atom+xml"), + }, + CurrentUserOrganization: &FeedLink{ + HRef: String(""), + Type: String(""), + }, + CurrentUserOrganizations: []FeedLink{ + { + HRef: String("https://github.com/organizations/github/defunkt.private.atom?token=abc123"), + Type: String("application/atom+xml"), + }, + }, + }, +} diff --git a/vendor/github.com/google/go-github/github/activity_watching_test.go b/vendor/github.com/google/go-github/github/activity_watching_test.go new file mode 100644 index 000000000000..d765f67e9e08 --- /dev/null +++ b/vendor/github.com/google/go-github/github/activity_watching_test.go @@ -0,0 +1,184 @@ +// Copyright 2014 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestActivityService_ListWatchers(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/subscribers", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + + fmt.Fprint(w, `[{"id":1}]`) + }) + + watchers, _, err := client.Activity.ListWatchers(context.Background(), "o", "r", &ListOptions{Page: 2}) + if err != nil { + t.Errorf("Activity.ListWatchers returned error: %v", err) + } + + want := []*User{{ID: Int(1)}} + if !reflect.DeepEqual(watchers, want) { + t.Errorf("Activity.ListWatchers returned %+v, want %+v", watchers, want) + } +} + +func TestActivityService_ListWatched_authenticatedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/subscriptions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + fmt.Fprint(w, `[{"id":1}]`) + }) + + watched, _, err := client.Activity.ListWatched(context.Background(), "", &ListOptions{Page: 2}) + if err != nil { + t.Errorf("Activity.ListWatched returned error: %v", err) + } + + want := []*Repository{{ID: Int(1)}} + if !reflect.DeepEqual(watched, want) { + t.Errorf("Activity.ListWatched returned %+v, want %+v", watched, want) + } +} + +func TestActivityService_ListWatched_specifiedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/subscriptions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + fmt.Fprint(w, `[{"id":1}]`) + }) + + watched, _, err := client.Activity.ListWatched(context.Background(), "u", &ListOptions{Page: 2}) + if err != nil { + t.Errorf("Activity.ListWatched returned error: %v", err) + } + + want := []*Repository{{ID: Int(1)}} + if !reflect.DeepEqual(watched, want) { + t.Errorf("Activity.ListWatched returned %+v, want %+v", watched, want) + } +} + +func TestActivityService_GetRepositorySubscription_true(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/subscription", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"subscribed":true}`) + }) + + sub, _, err := client.Activity.GetRepositorySubscription(context.Background(), "o", "r") + if err != nil { + t.Errorf("Activity.GetRepositorySubscription returned error: %v", err) + } + + want := &Subscription{Subscribed: Bool(true)} + if !reflect.DeepEqual(sub, want) { + t.Errorf("Activity.GetRepositorySubscription returned %+v, want %+v", sub, want) + } +} + +func TestActivityService_GetRepositorySubscription_false(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/subscription", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNotFound) + }) + + sub, _, err := client.Activity.GetRepositorySubscription(context.Background(), "o", "r") + if err != nil { + t.Errorf("Activity.GetRepositorySubscription returned error: %v", err) + } + + var want *Subscription + if !reflect.DeepEqual(sub, want) { + t.Errorf("Activity.GetRepositorySubscription returned %+v, want %+v", sub, want) + } +} + +func TestActivityService_GetRepositorySubscription_error(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/subscription", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusBadRequest) + }) + + _, _, err := client.Activity.GetRepositorySubscription(context.Background(), "o", "r") + if err == nil { + t.Errorf("Expected HTTP 400 response") + } +} + +func TestActivityService_SetRepositorySubscription(t *testing.T) { + setup() + defer teardown() + + input := &Subscription{Subscribed: Bool(true)} + + mux.HandleFunc("/repos/o/r/subscription", func(w http.ResponseWriter, r *http.Request) { + v := new(Subscription) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PUT") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"ignored":true}`) + }) + + sub, _, err := client.Activity.SetRepositorySubscription(context.Background(), "o", "r", input) + if err != nil { + t.Errorf("Activity.SetRepositorySubscription returned error: %v", err) + } + + want := &Subscription{Ignored: Bool(true)} + if !reflect.DeepEqual(sub, want) { + t.Errorf("Activity.SetRepositorySubscription returned %+v, want %+v", sub, want) + } +} + +func TestActivityService_DeleteRepositorySubscription(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/subscription", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Activity.DeleteRepositorySubscription(context.Background(), "o", "r") + if err != nil { + t.Errorf("Activity.DeleteRepositorySubscription returned error: %v", err) + } +} diff --git a/vendor/github.com/google/go-github/github/admin_test.go b/vendor/github.com/google/go-github/github/admin_test.go new file mode 100644 index 000000000000..32a870eb4070 --- /dev/null +++ b/vendor/github.com/google/go-github/github/admin_test.go @@ -0,0 +1,81 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestAdminService_UpdateUserLDAPMapping(t *testing.T) { + setup() + defer teardown() + + input := &UserLDAPMapping{ + LDAPDN: String("uid=asdf,ou=users,dc=github,dc=com"), + } + + mux.HandleFunc("/admin/ldap/users/u/mapping", func(w http.ResponseWriter, r *http.Request) { + v := new(UserLDAPMapping) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + fmt.Fprint(w, `{"id":1,"ldap_dn":"uid=asdf,ou=users,dc=github,dc=com"}`) + }) + + mapping, _, err := client.Admin.UpdateUserLDAPMapping(context.Background(), "u", input) + if err != nil { + t.Errorf("Admin.UpdateUserLDAPMapping returned error: %v", err) + } + + want := &UserLDAPMapping{ + ID: Int(1), + LDAPDN: String("uid=asdf,ou=users,dc=github,dc=com"), + } + if !reflect.DeepEqual(mapping, want) { + t.Errorf("Admin.UpdateUserLDAPMapping returned %+v, want %+v", mapping, want) + } +} + +func TestAdminService_UpdateTeamLDAPMapping(t *testing.T) { + setup() + defer teardown() + + input := &TeamLDAPMapping{ + LDAPDN: String("cn=Enterprise Ops,ou=teams,dc=github,dc=com"), + } + + mux.HandleFunc("/admin/ldap/teams/1/mapping", func(w http.ResponseWriter, r *http.Request) { + v := new(TeamLDAPMapping) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + fmt.Fprint(w, `{"id":1,"ldap_dn":"cn=Enterprise Ops,ou=teams,dc=github,dc=com"}`) + }) + + mapping, _, err := client.Admin.UpdateTeamLDAPMapping(context.Background(), 1, input) + if err != nil { + t.Errorf("Admin.UpdateTeamLDAPMapping returned error: %v", err) + } + + want := &TeamLDAPMapping{ + ID: Int(1), + LDAPDN: String("cn=Enterprise Ops,ou=teams,dc=github,dc=com"), + } + if !reflect.DeepEqual(mapping, want) { + t.Errorf("Admin.UpdateTeamLDAPMapping returned %+v, want %+v", mapping, want) + } +} diff --git a/vendor/github.com/google/go-github/github/apps_installation_test.go b/vendor/github.com/google/go-github/github/apps_installation_test.go new file mode 100644 index 000000000000..462d15a3f29d --- /dev/null +++ b/vendor/github.com/google/go-github/github/apps_installation_test.go @@ -0,0 +1,40 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestAppsService_ListRepos(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/installation/repositories", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeIntegrationPreview) + testFormValues(t, r, values{ + "page": "1", + "per_page": "2", + }) + fmt.Fprint(w, `{"repositories": [{"id":1}]}`) + }) + + opt := &ListOptions{Page: 1, PerPage: 2} + repositories, _, err := client.Apps.ListRepos(context.Background(), opt) + if err != nil { + t.Errorf("Apps.ListRepos returned error: %v", err) + } + + want := []*Repository{{ID: Int(1)}} + if !reflect.DeepEqual(repositories, want) { + t.Errorf("Apps.ListRepos returned %+v, want %+v", repositories, want) + } +} diff --git a/vendor/github.com/google/go-github/github/apps_test.go b/vendor/github.com/google/go-github/github/apps_test.go new file mode 100644 index 000000000000..f847b84271c1 --- /dev/null +++ b/vendor/github.com/google/go-github/github/apps_test.go @@ -0,0 +1,40 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestAppsService_ListInstallations(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/app/installations", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeIntegrationPreview) + testFormValues(t, r, values{ + "page": "1", + "per_page": "2", + }) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 1, PerPage: 2} + installations, _, err := client.Apps.ListInstallations(context.Background(), opt) + if err != nil { + t.Errorf("Apps.ListInstallations returned error: %v", err) + } + + want := []*Installation{{ID: Int(1)}} + if !reflect.DeepEqual(installations, want) { + t.Errorf("Apps.ListInstallations returned %+v, want %+v", installations, want) + } +} diff --git a/vendor/github.com/google/go-github/github/authorizations_test.go b/vendor/github.com/google/go-github/github/authorizations_test.go new file mode 100644 index 000000000000..61bf21252a44 --- /dev/null +++ b/vendor/github.com/google/go-github/github/authorizations_test.go @@ -0,0 +1,359 @@ +// Copyright 2015 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestAuthorizationsService_List(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/authorizations", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "1", "per_page": "2"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 1, PerPage: 2} + got, _, err := client.Authorizations.List(context.Background(), opt) + if err != nil { + t.Errorf("Authorizations.List returned error: %v", err) + } + + want := []*Authorization{{ID: Int(1)}} + if !reflect.DeepEqual(got, want) { + t.Errorf("Authorizations.List returned %+v, want %+v", *got[0].ID, *want[0].ID) + } +} + +func TestAuthorizationsService_Get(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/authorizations/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":1}`) + }) + + got, _, err := client.Authorizations.Get(context.Background(), 1) + if err != nil { + t.Errorf("Authorizations.Get returned error: %v", err) + } + + want := &Authorization{ID: Int(1)} + if !reflect.DeepEqual(got, want) { + t.Errorf("Authorizations.Get returned auth %+v, want %+v", got, want) + } +} + +func TestAuthorizationsService_Create(t *testing.T) { + setup() + defer teardown() + + input := &AuthorizationRequest{ + Note: String("test"), + } + + mux.HandleFunc("/authorizations", func(w http.ResponseWriter, r *http.Request) { + v := new(AuthorizationRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"ID":1}`) + }) + + got, _, err := client.Authorizations.Create(context.Background(), input) + if err != nil { + t.Errorf("Authorizations.Create returned error: %v", err) + } + + want := &Authorization{ID: Int(1)} + if !reflect.DeepEqual(got, want) { + t.Errorf("Authorization.Create returned %+v, want %+v", got, want) + } +} + +func TestAuthorizationsService_GetOrCreateForApp(t *testing.T) { + setup() + defer teardown() + + input := &AuthorizationRequest{ + Note: String("test"), + } + + mux.HandleFunc("/authorizations/clients/id", func(w http.ResponseWriter, r *http.Request) { + v := new(AuthorizationRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PUT") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"ID":1}`) + }) + + got, _, err := client.Authorizations.GetOrCreateForApp(context.Background(), "id", input) + if err != nil { + t.Errorf("Authorizations.GetOrCreateForApp returned error: %v", err) + } + + want := &Authorization{ID: Int(1)} + if !reflect.DeepEqual(got, want) { + t.Errorf("Authorization.GetOrCreateForApp returned %+v, want %+v", got, want) + } +} + +func TestAuthorizationsService_GetOrCreateForApp_Fingerprint(t *testing.T) { + setup() + defer teardown() + + input := &AuthorizationRequest{ + Note: String("test"), + Fingerprint: String("fp"), + } + + mux.HandleFunc("/authorizations/clients/id/fp", func(w http.ResponseWriter, r *http.Request) { + v := new(AuthorizationRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PUT") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"ID":1}`) + }) + + got, _, err := client.Authorizations.GetOrCreateForApp(context.Background(), "id", input) + if err != nil { + t.Errorf("Authorizations.GetOrCreateForApp returned error: %v", err) + } + + want := &Authorization{ID: Int(1)} + if !reflect.DeepEqual(got, want) { + t.Errorf("Authorization.GetOrCreateForApp returned %+v, want %+v", got, want) + } +} + +func TestAuthorizationsService_Edit(t *testing.T) { + setup() + defer teardown() + + input := &AuthorizationUpdateRequest{ + Note: String("test"), + } + + mux.HandleFunc("/authorizations/1", func(w http.ResponseWriter, r *http.Request) { + v := new(AuthorizationUpdateRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"ID":1}`) + }) + + got, _, err := client.Authorizations.Edit(context.Background(), 1, input) + if err != nil { + t.Errorf("Authorizations.Edit returned error: %v", err) + } + + want := &Authorization{ID: Int(1)} + if !reflect.DeepEqual(got, want) { + t.Errorf("Authorization.Update returned %+v, want %+v", got, want) + } +} + +func TestAuthorizationsService_Delete(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/authorizations/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Authorizations.Delete(context.Background(), 1) + if err != nil { + t.Errorf("Authorizations.Delete returned error: %v", err) + } +} + +func TestAuthorizationsService_Check(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/applications/id/tokens/t", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":1}`) + }) + + got, _, err := client.Authorizations.Check(context.Background(), "id", "t") + if err != nil { + t.Errorf("Authorizations.Check returned error: %v", err) + } + + want := &Authorization{ID: Int(1)} + if !reflect.DeepEqual(got, want) { + t.Errorf("Authorizations.Check returned auth %+v, want %+v", got, want) + } +} + +func TestAuthorizationsService_Reset(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/applications/id/tokens/t", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + fmt.Fprint(w, `{"ID":1}`) + }) + + got, _, err := client.Authorizations.Reset(context.Background(), "id", "t") + if err != nil { + t.Errorf("Authorizations.Reset returned error: %v", err) + } + + want := &Authorization{ID: Int(1)} + if !reflect.DeepEqual(got, want) { + t.Errorf("Authorizations.Reset returned auth %+v, want %+v", got, want) + } +} + +func TestAuthorizationsService_Revoke(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/applications/id/tokens/t", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Authorizations.Revoke(context.Background(), "id", "t") + if err != nil { + t.Errorf("Authorizations.Revoke returned error: %v", err) + } +} + +func TestListGrants(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/applications/grants", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[{"id": 1}]`) + }) + + got, _, err := client.Authorizations.ListGrants(context.Background(), nil) + if err != nil { + t.Errorf("OAuthAuthorizations.ListGrants returned error: %v", err) + } + + want := []*Grant{{ID: Int(1)}} + if !reflect.DeepEqual(got, want) { + t.Errorf("OAuthAuthorizations.ListGrants = %+v, want %+v", got, want) + } +} + +func TestListGrants_withOptions(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/applications/grants", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + fmt.Fprint(w, `[{"id": 1}]`) + }) + + _, _, err := client.Authorizations.ListGrants(context.Background(), &ListOptions{Page: 2}) + if err != nil { + t.Errorf("OAuthAuthorizations.ListGrants returned error: %v", err) + } +} + +func TestGetGrant(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/applications/grants/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id": 1}`) + }) + + got, _, err := client.Authorizations.GetGrant(context.Background(), 1) + if err != nil { + t.Errorf("OAuthAuthorizations.GetGrant returned error: %v", err) + } + + want := &Grant{ID: Int(1)} + if !reflect.DeepEqual(got, want) { + t.Errorf("OAuthAuthorizations.GetGrant = %+v, want %+v", got, want) + } +} + +func TestDeleteGrant(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/applications/grants/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Authorizations.DeleteGrant(context.Background(), 1) + if err != nil { + t.Errorf("OAuthAuthorizations.DeleteGrant returned error: %v", err) + } +} + +func TestAuthorizationsService_CreateImpersonation(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/admin/users/u/authorizations", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + fmt.Fprint(w, `{"id":1}`) + }) + + req := &AuthorizationRequest{Scopes: []Scope{ScopePublicRepo}} + got, _, err := client.Authorizations.CreateImpersonation(context.Background(), "u", req) + if err != nil { + t.Errorf("Authorizations.CreateImpersonation returned error: %+v", err) + } + + want := &Authorization{ID: Int(1)} + if !reflect.DeepEqual(got, want) { + t.Errorf("Authorizations.CreateImpersonation returned %+v, want %+v", *got.ID, *want.ID) + } +} + +func TestAuthorizationsService_DeleteImpersonation(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/admin/users/u/authorizations", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Authorizations.DeleteImpersonation(context.Background(), "u") + if err != nil { + t.Errorf("Authorizations.DeleteImpersonation returned error: %+v", err) + } +} diff --git a/vendor/github.com/google/go-github/github/examples_test.go b/vendor/github.com/google/go-github/github/examples_test.go new file mode 100644 index 000000000000..f09d6505ac76 --- /dev/null +++ b/vendor/github.com/google/go-github/github/examples_test.go @@ -0,0 +1,76 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github_test + +import ( + "context" + "fmt" + "log" + + "github.com/google/go-github/github" +) + +func ExampleClient_Markdown() { + client := github.NewClient(nil) + + input := "# heading #\n\nLink to issue #1" + opt := &github.MarkdownOptions{Mode: "gfm", Context: "google/go-github"} + + output, _, err := client.Markdown(context.Background(), input, opt) + if err != nil { + fmt.Println(err) + } + + fmt.Println(output) +} + +func ExampleRepositoriesService_GetReadme() { + client := github.NewClient(nil) + + readme, _, err := client.Repositories.GetReadme(context.Background(), "google", "go-github", nil) + if err != nil { + fmt.Println(err) + return + } + + content, err := readme.GetContent() + if err != nil { + fmt.Println(err) + return + } + + fmt.Printf("google/go-github README:\n%v\n", content) +} + +func ExampleRepositoriesService_List() { + client := github.NewClient(nil) + + user := "willnorris" + opt := &github.RepositoryListOptions{Type: "owner", Sort: "updated", Direction: "desc"} + + repos, _, err := client.Repositories.List(context.Background(), user, opt) + if err != nil { + fmt.Println(err) + } + + fmt.Printf("Recently updated repositories by %q: %v", user, github.Stringify(repos)) +} + +func ExampleUsersService_ListAll() { + client := github.NewClient(nil) + opts := &github.UserListOptions{} + for { + users, _, err := client.Users.ListAll(context.Background(), opts) + if err != nil { + log.Fatalf("error listing users: %v", err) + } + if len(users) == 0 { + break + } + opts.Since = *users[len(users)-1].ID + // Process users... + } +} diff --git a/vendor/github.com/google/go-github/github/gists_comments_test.go b/vendor/github.com/google/go-github/github/gists_comments_test.go new file mode 100644 index 000000000000..4b6841363ea5 --- /dev/null +++ b/vendor/github.com/google/go-github/github/gists_comments_test.go @@ -0,0 +1,154 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestGistsService_ListComments(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/gists/1/comments", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id": 1}]`) + }) + + opt := &ListOptions{Page: 2} + comments, _, err := client.Gists.ListComments(context.Background(), "1", opt) + if err != nil { + t.Errorf("Gists.Comments returned error: %v", err) + } + + want := []*GistComment{{ID: Int(1)}} + if !reflect.DeepEqual(comments, want) { + t.Errorf("Gists.ListComments returned %+v, want %+v", comments, want) + } +} + +func TestGistsService_ListComments_invalidID(t *testing.T) { + _, _, err := client.Gists.ListComments(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestGistsService_GetComment(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/gists/1/comments/2", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id": 1}`) + }) + + comment, _, err := client.Gists.GetComment(context.Background(), "1", 2) + if err != nil { + t.Errorf("Gists.GetComment returned error: %v", err) + } + + want := &GistComment{ID: Int(1)} + if !reflect.DeepEqual(comment, want) { + t.Errorf("Gists.GetComment returned %+v, want %+v", comment, want) + } +} + +func TestGistsService_GetComment_invalidID(t *testing.T) { + _, _, err := client.Gists.GetComment(context.Background(), "%", 1) + testURLParseError(t, err) +} + +func TestGistsService_CreateComment(t *testing.T) { + setup() + defer teardown() + + input := &GistComment{ID: Int(1), Body: String("b")} + + mux.HandleFunc("/gists/1/comments", func(w http.ResponseWriter, r *http.Request) { + v := new(GistComment) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + comment, _, err := client.Gists.CreateComment(context.Background(), "1", input) + if err != nil { + t.Errorf("Gists.CreateComment returned error: %v", err) + } + + want := &GistComment{ID: Int(1)} + if !reflect.DeepEqual(comment, want) { + t.Errorf("Gists.CreateComment returned %+v, want %+v", comment, want) + } +} + +func TestGistsService_CreateComment_invalidID(t *testing.T) { + _, _, err := client.Gists.CreateComment(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestGistsService_EditComment(t *testing.T) { + setup() + defer teardown() + + input := &GistComment{ID: Int(1), Body: String("b")} + + mux.HandleFunc("/gists/1/comments/2", func(w http.ResponseWriter, r *http.Request) { + v := new(GistComment) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + comment, _, err := client.Gists.EditComment(context.Background(), "1", 2, input) + if err != nil { + t.Errorf("Gists.EditComment returned error: %v", err) + } + + want := &GistComment{ID: Int(1)} + if !reflect.DeepEqual(comment, want) { + t.Errorf("Gists.EditComment returned %+v, want %+v", comment, want) + } +} + +func TestGistsService_EditComment_invalidID(t *testing.T) { + _, _, err := client.Gists.EditComment(context.Background(), "%", 1, nil) + testURLParseError(t, err) +} + +func TestGistsService_DeleteComment(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/gists/1/comments/2", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Gists.DeleteComment(context.Background(), "1", 2) + if err != nil { + t.Errorf("Gists.Delete returned error: %v", err) + } +} + +func TestGistsService_DeleteComment_invalidID(t *testing.T) { + _, err := client.Gists.DeleteComment(context.Background(), "%", 1) + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/gists_test.go b/vendor/github.com/google/go-github/github/gists_test.go new file mode 100644 index 000000000000..6065f277d8ad --- /dev/null +++ b/vendor/github.com/google/go-github/github/gists_test.go @@ -0,0 +1,507 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" + "time" +) + +func TestGistsService_List_specifiedUser(t *testing.T) { + setup() + defer teardown() + + since := "2013-01-01T00:00:00Z" + + mux.HandleFunc("/users/u/gists", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "since": since, + }) + fmt.Fprint(w, `[{"id": "1"}]`) + }) + + opt := &GistListOptions{Since: time.Date(2013, time.January, 1, 0, 0, 0, 0, time.UTC)} + gists, _, err := client.Gists.List(context.Background(), "u", opt) + if err != nil { + t.Errorf("Gists.List returned error: %v", err) + } + + want := []*Gist{{ID: String("1")}} + if !reflect.DeepEqual(gists, want) { + t.Errorf("Gists.List returned %+v, want %+v", gists, want) + } +} + +func TestGistsService_List_authenticatedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/gists", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[{"id": "1"}]`) + }) + + gists, _, err := client.Gists.List(context.Background(), "", nil) + if err != nil { + t.Errorf("Gists.List returned error: %v", err) + } + + want := []*Gist{{ID: String("1")}} + if !reflect.DeepEqual(gists, want) { + t.Errorf("Gists.List returned %+v, want %+v", gists, want) + } +} + +func TestGistsService_List_invalidUser(t *testing.T) { + _, _, err := client.Gists.List(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestGistsService_ListAll(t *testing.T) { + setup() + defer teardown() + + since := "2013-01-01T00:00:00Z" + + mux.HandleFunc("/gists/public", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "since": since, + }) + fmt.Fprint(w, `[{"id": "1"}]`) + }) + + opt := &GistListOptions{Since: time.Date(2013, time.January, 1, 0, 0, 0, 0, time.UTC)} + gists, _, err := client.Gists.ListAll(context.Background(), opt) + if err != nil { + t.Errorf("Gists.ListAll returned error: %v", err) + } + + want := []*Gist{{ID: String("1")}} + if !reflect.DeepEqual(gists, want) { + t.Errorf("Gists.ListAll returned %+v, want %+v", gists, want) + } +} + +func TestGistsService_ListStarred(t *testing.T) { + setup() + defer teardown() + + since := "2013-01-01T00:00:00Z" + + mux.HandleFunc("/gists/starred", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "since": since, + }) + fmt.Fprint(w, `[{"id": "1"}]`) + }) + + opt := &GistListOptions{Since: time.Date(2013, time.January, 1, 0, 0, 0, 0, time.UTC)} + gists, _, err := client.Gists.ListStarred(context.Background(), opt) + if err != nil { + t.Errorf("Gists.ListStarred returned error: %v", err) + } + + want := []*Gist{{ID: String("1")}} + if !reflect.DeepEqual(gists, want) { + t.Errorf("Gists.ListStarred returned %+v, want %+v", gists, want) + } +} + +func TestGistsService_Get(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/gists/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id": "1"}`) + }) + + gist, _, err := client.Gists.Get(context.Background(), "1") + if err != nil { + t.Errorf("Gists.Get returned error: %v", err) + } + + want := &Gist{ID: String("1")} + if !reflect.DeepEqual(gist, want) { + t.Errorf("Gists.Get returned %+v, want %+v", gist, want) + } +} + +func TestGistsService_Get_invalidID(t *testing.T) { + _, _, err := client.Gists.Get(context.Background(), "%") + testURLParseError(t, err) +} + +func TestGistsService_GetRevision(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/gists/1/s", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id": "1"}`) + }) + + gist, _, err := client.Gists.GetRevision(context.Background(), "1", "s") + if err != nil { + t.Errorf("Gists.Get returned error: %v", err) + } + + want := &Gist{ID: String("1")} + if !reflect.DeepEqual(gist, want) { + t.Errorf("Gists.Get returned %+v, want %+v", gist, want) + } +} + +func TestGistsService_GetRevision_invalidID(t *testing.T) { + _, _, err := client.Gists.GetRevision(context.Background(), "%", "%") + testURLParseError(t, err) +} + +func TestGistsService_Create(t *testing.T) { + setup() + defer teardown() + + input := &Gist{ + Description: String("Gist description"), + Public: Bool(false), + Files: map[GistFilename]GistFile{ + "test.txt": {Content: String("Gist file content")}, + }, + } + + mux.HandleFunc("/gists", func(w http.ResponseWriter, r *http.Request) { + v := new(Gist) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, + ` + { + "id": "1", + "description": "Gist description", + "public": false, + "files": { + "test.txt": { + "filename": "test.txt" + } + } + }`) + }) + + gist, _, err := client.Gists.Create(context.Background(), input) + if err != nil { + t.Errorf("Gists.Create returned error: %v", err) + } + + want := &Gist{ + ID: String("1"), + Description: String("Gist description"), + Public: Bool(false), + Files: map[GistFilename]GistFile{ + "test.txt": {Filename: String("test.txt")}, + }, + } + if !reflect.DeepEqual(gist, want) { + t.Errorf("Gists.Create returned %+v, want %+v", gist, want) + } +} + +func TestGistsService_Edit(t *testing.T) { + setup() + defer teardown() + + input := &Gist{ + Description: String("New description"), + Files: map[GistFilename]GistFile{ + "new.txt": {Content: String("new file content")}, + }, + } + + mux.HandleFunc("/gists/1", func(w http.ResponseWriter, r *http.Request) { + v := new(Gist) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, + ` + { + "id": "1", + "description": "new description", + "public": false, + "files": { + "test.txt": { + "filename": "test.txt" + }, + "new.txt": { + "filename": "new.txt" + } + } + }`) + }) + + gist, _, err := client.Gists.Edit(context.Background(), "1", input) + if err != nil { + t.Errorf("Gists.Edit returned error: %v", err) + } + + want := &Gist{ + ID: String("1"), + Description: String("new description"), + Public: Bool(false), + Files: map[GistFilename]GistFile{ + "test.txt": {Filename: String("test.txt")}, + "new.txt": {Filename: String("new.txt")}, + }, + } + if !reflect.DeepEqual(gist, want) { + t.Errorf("Gists.Edit returned %+v, want %+v", gist, want) + } +} + +func TestGistsService_Edit_invalidID(t *testing.T) { + _, _, err := client.Gists.Edit(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestGistsService_ListCommits(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/gists/1/commits", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, nil) + fmt.Fprint(w, ` + [ + { + "url": "https://api.github.com/gists/1/1", + "version": "1", + "user": { + "id": 1 + }, + "change_status": { + "deletions": 0, + "additions": 180, + "total": 180 + }, + "committed_at": "2010-01-01T00:00:00Z" + } + ] + `) + }) + + gistCommits, _, err := client.Gists.ListCommits(context.Background(), "1", nil) + if err != nil { + t.Errorf("Gists.ListCommits returned error: %v", err) + } + + want := []*GistCommit{{ + URL: String("https://api.github.com/gists/1/1"), + Version: String("1"), + User: &User{ID: Int(1)}, + CommittedAt: &Timestamp{time.Date(2010, 1, 1, 00, 00, 00, 0, time.UTC)}, + ChangeStatus: &CommitStats{ + Additions: Int(180), + Deletions: Int(0), + Total: Int(180), + }}} + + if !reflect.DeepEqual(gistCommits, want) { + t.Errorf("Gists.ListCommits returned %+v, want %+v", gistCommits, want) + } +} + +func TestGistsService_ListCommits_withOptions(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/gists/1/commits", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + fmt.Fprint(w, `[]`) + }) + + _, _, err := client.Gists.ListCommits(context.Background(), "1", &ListOptions{Page: 2}) + if err != nil { + t.Errorf("Gists.ListCommits returned error: %v", err) + } +} + +func TestGistsService_Delete(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/gists/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Gists.Delete(context.Background(), "1") + if err != nil { + t.Errorf("Gists.Delete returned error: %v", err) + } +} + +func TestGistsService_Delete_invalidID(t *testing.T) { + _, err := client.Gists.Delete(context.Background(), "%") + testURLParseError(t, err) +} + +func TestGistsService_Star(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/gists/1/star", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PUT") + }) + + _, err := client.Gists.Star(context.Background(), "1") + if err != nil { + t.Errorf("Gists.Star returned error: %v", err) + } +} + +func TestGistsService_Star_invalidID(t *testing.T) { + _, err := client.Gists.Star(context.Background(), "%") + testURLParseError(t, err) +} + +func TestGistsService_Unstar(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/gists/1/star", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Gists.Unstar(context.Background(), "1") + if err != nil { + t.Errorf("Gists.Unstar returned error: %v", err) + } +} + +func TestGistsService_Unstar_invalidID(t *testing.T) { + _, err := client.Gists.Unstar(context.Background(), "%") + testURLParseError(t, err) +} + +func TestGistsService_IsStarred_hasStar(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/gists/1/star", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNoContent) + }) + + star, _, err := client.Gists.IsStarred(context.Background(), "1") + if err != nil { + t.Errorf("Gists.Starred returned error: %v", err) + } + if want := true; star != want { + t.Errorf("Gists.Starred returned %+v, want %+v", star, want) + } +} + +func TestGistsService_IsStarred_noStar(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/gists/1/star", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNotFound) + }) + + star, _, err := client.Gists.IsStarred(context.Background(), "1") + if err != nil { + t.Errorf("Gists.Starred returned error: %v", err) + } + if want := false; star != want { + t.Errorf("Gists.Starred returned %+v, want %+v", star, want) + } +} + +func TestGistsService_IsStarred_invalidID(t *testing.T) { + _, _, err := client.Gists.IsStarred(context.Background(), "%") + testURLParseError(t, err) +} + +func TestGistsService_Fork(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/gists/1/forks", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + fmt.Fprint(w, `{"id": "2"}`) + }) + + gist, _, err := client.Gists.Fork(context.Background(), "1") + if err != nil { + t.Errorf("Gists.Fork returned error: %v", err) + } + + want := &Gist{ID: String("2")} + if !reflect.DeepEqual(gist, want) { + t.Errorf("Gists.Fork returned %+v, want %+v", gist, want) + } +} + +func TestGistsService_ListForks(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/gists/1/forks", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, nil) + fmt.Fprint(w, ` + [ + {"url": "https://api.github.com/gists/1", + "user": {"id": 1}, + "id": "1", + "created_at": "2010-01-01T00:00:00Z", + "updated_at": "2013-01-01T00:00:00Z" + } + ] + `) + }) + + gistForks, _, err := client.Gists.ListForks(context.Background(), "1") + if err != nil { + t.Errorf("Gists.ListForks returned error: %v", err) + } + + want := []*GistFork{{ + URL: String("https://api.github.com/gists/1"), + ID: String("1"), + User: &User{ID: Int(1)}, + CreatedAt: &Timestamp{time.Date(2010, 1, 1, 00, 00, 00, 0, time.UTC)}, + UpdatedAt: &Timestamp{time.Date(2013, 1, 1, 00, 00, 00, 0, time.UTC)}}} + + if !reflect.DeepEqual(gistForks, want) { + t.Errorf("Gists.ListForks returned %+v, want %+v", gistForks, want) + } +} + +func TestGistsService_Fork_invalidID(t *testing.T) { + _, _, err := client.Gists.Fork(context.Background(), "%") + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/git_blobs_test.go b/vendor/github.com/google/go-github/github/git_blobs_test.go new file mode 100644 index 000000000000..00861bb9a641 --- /dev/null +++ b/vendor/github.com/google/go-github/github/git_blobs_test.go @@ -0,0 +1,98 @@ +// Copyright 2014 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestGitService_GetBlob(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/git/blobs/s", func(w http.ResponseWriter, r *http.Request) { + if m := "GET"; m != r.Method { + t.Errorf("Request method = %v, want %v", r.Method, m) + } + fmt.Fprint(w, `{ + "sha": "s", + "content": "blob content" + }`) + }) + + blob, _, err := client.Git.GetBlob(context.Background(), "o", "r", "s") + if err != nil { + t.Errorf("Git.GetBlob returned error: %v", err) + } + + want := Blob{ + SHA: String("s"), + Content: String("blob content"), + } + + if !reflect.DeepEqual(*blob, want) { + t.Errorf("Blob.Get returned %+v, want %+v", *blob, want) + } +} + +func TestGitService_GetBlob_invalidOwner(t *testing.T) { + _, _, err := client.Git.GetBlob(context.Background(), "%", "%", "%") + testURLParseError(t, err) +} + +func TestGitService_CreateBlob(t *testing.T) { + setup() + defer teardown() + + input := &Blob{ + SHA: String("s"), + Content: String("blob content"), + Encoding: String("utf-8"), + Size: Int(12), + } + + mux.HandleFunc("/repos/o/r/git/blobs", func(w http.ResponseWriter, r *http.Request) { + v := new(Blob) + json.NewDecoder(r.Body).Decode(v) + + if m := "POST"; m != r.Method { + t.Errorf("Request method = %v, want %v", r.Method, m) + } + + want := input + if !reflect.DeepEqual(v, want) { + t.Errorf("Git.CreateBlob request body: %+v, want %+v", v, want) + } + + fmt.Fprint(w, `{ + "sha": "s", + "content": "blob content", + "encoding": "utf-8", + "size": 12 + }`) + }) + + blob, _, err := client.Git.CreateBlob(context.Background(), "o", "r", input) + if err != nil { + t.Errorf("Git.CreateBlob returned error: %v", err) + } + + want := input + + if !reflect.DeepEqual(*blob, *want) { + t.Errorf("Git.CreateBlob returned %+v, want %+v", *blob, *want) + } +} + +func TestGitService_CreateBlob_invalidOwner(t *testing.T) { + _, _, err := client.Git.CreateBlob(context.Background(), "%", "%", &Blob{}) + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/git_commits_test.go b/vendor/github.com/google/go-github/github/git_commits_test.go new file mode 100644 index 000000000000..cd392329ea2d --- /dev/null +++ b/vendor/github.com/google/go-github/github/git_commits_test.go @@ -0,0 +1,84 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestGitService_GetCommit(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/git/commits/s", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeGitSigningPreview) + fmt.Fprint(w, `{"sha":"s","message":"m","author":{"name":"n"}}`) + }) + + commit, _, err := client.Git.GetCommit(context.Background(), "o", "r", "s") + if err != nil { + t.Errorf("Git.GetCommit returned error: %v", err) + } + + want := &Commit{SHA: String("s"), Message: String("m"), Author: &CommitAuthor{Name: String("n")}} + if !reflect.DeepEqual(commit, want) { + t.Errorf("Git.GetCommit returned %+v, want %+v", commit, want) + } +} + +func TestGitService_GetCommit_invalidOwner(t *testing.T) { + _, _, err := client.Git.GetCommit(context.Background(), "%", "%", "%") + testURLParseError(t, err) +} + +func TestGitService_CreateCommit(t *testing.T) { + setup() + defer teardown() + + input := &Commit{ + Message: String("m"), + Tree: &Tree{SHA: String("t")}, + Parents: []Commit{{SHA: String("p")}}, + } + + mux.HandleFunc("/repos/o/r/git/commits", func(w http.ResponseWriter, r *http.Request) { + v := new(createCommit) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + + want := &createCommit{ + Message: input.Message, + Tree: String("t"), + Parents: []string{"p"}, + } + if !reflect.DeepEqual(v, want) { + t.Errorf("Request body = %+v, want %+v", v, want) + } + fmt.Fprint(w, `{"sha":"s"}`) + }) + + commit, _, err := client.Git.CreateCommit(context.Background(), "o", "r", input) + if err != nil { + t.Errorf("Git.CreateCommit returned error: %v", err) + } + + want := &Commit{SHA: String("s")} + if !reflect.DeepEqual(commit, want) { + t.Errorf("Git.CreateCommit returned %+v, want %+v", commit, want) + } +} + +func TestGitService_CreateCommit_invalidOwner(t *testing.T) { + _, _, err := client.Git.CreateCommit(context.Background(), "%", "%", &Commit{}) + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/git_refs_test.go b/vendor/github.com/google/go-github/github/git_refs_test.go new file mode 100644 index 000000000000..62025bf44460 --- /dev/null +++ b/vendor/github.com/google/go-github/github/git_refs_test.go @@ -0,0 +1,429 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestGitService_GetRef_singleRef(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/git/refs/heads/b", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, ` + { + "ref": "refs/heads/b", + "url": "https://api.github.com/repos/o/r/git/refs/heads/b", + "object": { + "type": "commit", + "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd", + "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd" + } + }`) + }) + + ref, _, err := client.Git.GetRef(context.Background(), "o", "r", "refs/heads/b") + if err != nil { + t.Fatalf("Git.GetRef returned error: %v", err) + } + + want := &Reference{ + Ref: String("refs/heads/b"), + URL: String("https://api.github.com/repos/o/r/git/refs/heads/b"), + Object: &GitObject{ + Type: String("commit"), + SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"), + URL: String("https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"), + }, + } + if !reflect.DeepEqual(ref, want) { + t.Errorf("Git.GetRef returned %+v, want %+v", ref, want) + } + + // without 'refs/' prefix + if _, _, err := client.Git.GetRef(context.Background(), "o", "r", "heads/b"); err != nil { + t.Errorf("Git.GetRef returned error: %v", err) + } +} + +func TestGitService_GetRef_multipleRefs(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/git/refs/heads/b", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, ` + [ + { + "ref": "refs/heads/booger", + "url": "https://api.github.com/repos/o/r/git/refs/heads/booger", + "object": { + "type": "commit", + "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd", + "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd" + } + }, + { + "ref": "refs/heads/bandsaw", + "url": "https://api.github.com/repos/o/r/git/refs/heads/bandsaw", + "object": { + "type": "commit", + "sha": "612077ae6dffb4d2fbd8ce0cccaa58893b07b5ac", + "url": "https://api.github.com/repos/o/r/git/commits/612077ae6dffb4d2fbd8ce0cccaa58893b07b5ac" + } + } + ] + `) + }) + + _, _, err := client.Git.GetRef(context.Background(), "o", "r", "refs/heads/b") + want := "no exact match found for this ref" + if err.Error() != want { + t.Errorf("Git.GetRef returned %+v, want %+v", err, want) + } + +} + +func TestGitService_GetRefs_singleRef(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/git/refs/heads/b", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, ` + { + "ref": "refs/heads/b", + "url": "https://api.github.com/repos/o/r/git/refs/heads/b", + "object": { + "type": "commit", + "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd", + "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd" + } + }`) + }) + + refs, _, err := client.Git.GetRefs(context.Background(), "o", "r", "refs/heads/b") + if err != nil { + t.Fatalf("Git.GetRefs returned error: %v", err) + } + + ref := refs[0] + want := &Reference{ + Ref: String("refs/heads/b"), + URL: String("https://api.github.com/repos/o/r/git/refs/heads/b"), + Object: &GitObject{ + Type: String("commit"), + SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"), + URL: String("https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"), + }, + } + if !reflect.DeepEqual(ref, want) { + t.Errorf("Git.GetRefs returned %+v, want %+v", ref, want) + } + + // without 'refs/' prefix + if _, _, err := client.Git.GetRefs(context.Background(), "o", "r", "heads/b"); err != nil { + t.Errorf("Git.GetRefs returned error: %v", err) + } +} + +func TestGitService_GetRefs_multipleRefs(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/git/refs/heads/b", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, ` + [ + { + "ref": "refs/heads/booger", + "url": "https://api.github.com/repos/o/r/git/refs/heads/booger", + "object": { + "type": "commit", + "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd", + "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd" + } + }, + { + "ref": "refs/heads/bandsaw", + "url": "https://api.github.com/repos/o/r/git/refs/heads/bandsaw", + "object": { + "type": "commit", + "sha": "612077ae6dffb4d2fbd8ce0cccaa58893b07b5ac", + "url": "https://api.github.com/repos/o/r/git/commits/612077ae6dffb4d2fbd8ce0cccaa58893b07b5ac" + } + } + ] + `) + }) + + refs, _, err := client.Git.GetRefs(context.Background(), "o", "r", "refs/heads/b") + if err != nil { + t.Errorf("Git.GetRefs returned error: %v", err) + } + + want := &Reference{ + Ref: String("refs/heads/booger"), + URL: String("https://api.github.com/repos/o/r/git/refs/heads/booger"), + Object: &GitObject{ + Type: String("commit"), + SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"), + URL: String("https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"), + }, + } + if !reflect.DeepEqual(refs[0], want) { + t.Errorf("Git.GetRefs returned %+v, want %+v", refs[0], want) + } +} + +// TestGitService_GetRefs_noRefs tests for behaviour resulting from an unexpected GH response. This should never actually happen. +func TestGitService_GetRefs_noRefs(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/git/refs/heads/b", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, "[]") + }) + + _, _, err := client.Git.GetRefs(context.Background(), "o", "r", "refs/heads/b") + want := "unexpected response from GitHub API: an array of refs with length 0" + if err.Error() != want { + t.Errorf("Git.GetRefs returned %+v, want %+v", err, want) + } + +} + +func TestGitService_ListRefs(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/git/refs", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, ` + [ + { + "ref": "refs/heads/branchA", + "url": "https://api.github.com/repos/o/r/git/refs/heads/branchA", + "object": { + "type": "commit", + "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd", + "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd" + } + }, + { + "ref": "refs/heads/branchB", + "url": "https://api.github.com/repos/o/r/git/refs/heads/branchB", + "object": { + "type": "commit", + "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd", + "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd" + } + } + ]`) + }) + + refs, _, err := client.Git.ListRefs(context.Background(), "o", "r", nil) + if err != nil { + t.Errorf("Git.ListRefs returned error: %v", err) + } + + want := []*Reference{ + { + Ref: String("refs/heads/branchA"), + URL: String("https://api.github.com/repos/o/r/git/refs/heads/branchA"), + Object: &GitObject{ + Type: String("commit"), + SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"), + URL: String("https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"), + }, + }, + { + Ref: String("refs/heads/branchB"), + URL: String("https://api.github.com/repos/o/r/git/refs/heads/branchB"), + Object: &GitObject{ + Type: String("commit"), + SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"), + URL: String("https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"), + }, + }, + } + if !reflect.DeepEqual(refs, want) { + t.Errorf("Git.ListRefs returned %+v, want %+v", refs, want) + } +} + +func TestGitService_ListRefs_options(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/git/refs/t", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"ref": "r"}]`) + }) + + opt := &ReferenceListOptions{Type: "t", ListOptions: ListOptions{Page: 2}} + refs, _, err := client.Git.ListRefs(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Git.ListRefs returned error: %v", err) + } + + want := []*Reference{{Ref: String("r")}} + if !reflect.DeepEqual(refs, want) { + t.Errorf("Git.ListRefs returned %+v, want %+v", refs, want) + } +} + +func TestGitService_CreateRef(t *testing.T) { + setup() + defer teardown() + + args := &createRefRequest{ + Ref: String("refs/heads/b"), + SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"), + } + + mux.HandleFunc("/repos/o/r/git/refs", func(w http.ResponseWriter, r *http.Request) { + v := new(createRefRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, args) { + t.Errorf("Request body = %+v, want %+v", v, args) + } + fmt.Fprint(w, ` + { + "ref": "refs/heads/b", + "url": "https://api.github.com/repos/o/r/git/refs/heads/b", + "object": { + "type": "commit", + "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd", + "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd" + } + }`) + }) + + ref, _, err := client.Git.CreateRef(context.Background(), "o", "r", &Reference{ + Ref: String("refs/heads/b"), + Object: &GitObject{ + SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"), + }, + }) + if err != nil { + t.Errorf("Git.CreateRef returned error: %v", err) + } + + want := &Reference{ + Ref: String("refs/heads/b"), + URL: String("https://api.github.com/repos/o/r/git/refs/heads/b"), + Object: &GitObject{ + Type: String("commit"), + SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"), + URL: String("https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"), + }, + } + if !reflect.DeepEqual(ref, want) { + t.Errorf("Git.CreateRef returned %+v, want %+v", ref, want) + } + + // without 'refs/' prefix + _, _, err = client.Git.CreateRef(context.Background(), "o", "r", &Reference{ + Ref: String("heads/b"), + Object: &GitObject{ + SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"), + }, + }) + if err != nil { + t.Errorf("Git.CreateRef returned error: %v", err) + } +} + +func TestGitService_UpdateRef(t *testing.T) { + setup() + defer teardown() + + args := &updateRefRequest{ + SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"), + Force: Bool(true), + } + + mux.HandleFunc("/repos/o/r/git/refs/heads/b", func(w http.ResponseWriter, r *http.Request) { + v := new(updateRefRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, args) { + t.Errorf("Request body = %+v, want %+v", v, args) + } + fmt.Fprint(w, ` + { + "ref": "refs/heads/b", + "url": "https://api.github.com/repos/o/r/git/refs/heads/b", + "object": { + "type": "commit", + "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd", + "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd" + } + }`) + }) + + ref, _, err := client.Git.UpdateRef(context.Background(), "o", "r", &Reference{ + Ref: String("refs/heads/b"), + Object: &GitObject{SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd")}, + }, true) + if err != nil { + t.Errorf("Git.UpdateRef returned error: %v", err) + } + + want := &Reference{ + Ref: String("refs/heads/b"), + URL: String("https://api.github.com/repos/o/r/git/refs/heads/b"), + Object: &GitObject{ + Type: String("commit"), + SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"), + URL: String("https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"), + }, + } + if !reflect.DeepEqual(ref, want) { + t.Errorf("Git.UpdateRef returned %+v, want %+v", ref, want) + } + + // without 'refs/' prefix + _, _, err = client.Git.UpdateRef(context.Background(), "o", "r", &Reference{ + Ref: String("heads/b"), + Object: &GitObject{SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd")}, + }, true) + if err != nil { + t.Errorf("Git.UpdateRef returned error: %v", err) + } +} + +func TestGitService_DeleteRef(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/git/refs/heads/b", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Git.DeleteRef(context.Background(), "o", "r", "refs/heads/b") + if err != nil { + t.Errorf("Git.DeleteRef returned error: %v", err) + } + + // without 'refs/' prefix + if _, err := client.Git.DeleteRef(context.Background(), "o", "r", "heads/b"); err != nil { + t.Errorf("Git.DeleteRef returned error: %v", err) + } +} diff --git a/vendor/github.com/google/go-github/github/git_tags_test.go b/vendor/github.com/google/go-github/github/git_tags_test.go new file mode 100644 index 000000000000..7031fe8a4d3e --- /dev/null +++ b/vendor/github.com/google/go-github/github/git_tags_test.go @@ -0,0 +1,69 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestGitService_GetTag(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/git/tags/s", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeGitSigningPreview) + + fmt.Fprint(w, `{"tag": "t"}`) + }) + + tag, _, err := client.Git.GetTag(context.Background(), "o", "r", "s") + if err != nil { + t.Errorf("Git.GetTag returned error: %v", err) + } + + want := &Tag{Tag: String("t")} + if !reflect.DeepEqual(tag, want) { + t.Errorf("Git.GetTag returned %+v, want %+v", tag, want) + } +} + +func TestGitService_CreateTag(t *testing.T) { + setup() + defer teardown() + + input := &createTagRequest{Tag: String("t"), Object: String("s")} + + mux.HandleFunc("/repos/o/r/git/tags", func(w http.ResponseWriter, r *http.Request) { + v := new(createTagRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"tag": "t"}`) + }) + + tag, _, err := client.Git.CreateTag(context.Background(), "o", "r", &Tag{ + Tag: input.Tag, + Object: &GitObject{SHA: input.Object}, + }) + if err != nil { + t.Errorf("Git.CreateTag returned error: %v", err) + } + + want := &Tag{Tag: String("t")} + if !reflect.DeepEqual(tag, want) { + t.Errorf("Git.GetTag returned %+v, want %+v", tag, want) + } +} diff --git a/vendor/github.com/google/go-github/github/git_trees_test.go b/vendor/github.com/google/go-github/github/git_trees_test.go new file mode 100644 index 000000000000..29728eee3819 --- /dev/null +++ b/vendor/github.com/google/go-github/github/git_trees_test.go @@ -0,0 +1,191 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestGitService_GetTree(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/git/trees/s", func(w http.ResponseWriter, r *http.Request) { + if m := "GET"; m != r.Method { + t.Errorf("Request method = %v, want %v", r.Method, m) + } + fmt.Fprint(w, `{ + "sha": "s", + "tree": [ { "type": "blob" } ] + }`) + }) + + tree, _, err := client.Git.GetTree(context.Background(), "o", "r", "s", true) + if err != nil { + t.Errorf("Git.GetTree returned error: %v", err) + } + + want := Tree{ + SHA: String("s"), + Entries: []TreeEntry{ + { + Type: String("blob"), + }, + }, + } + if !reflect.DeepEqual(*tree, want) { + t.Errorf("Tree.Get returned %+v, want %+v", *tree, want) + } +} + +func TestGitService_GetTree_invalidOwner(t *testing.T) { + _, _, err := client.Git.GetTree(context.Background(), "%", "%", "%", false) + testURLParseError(t, err) +} + +func TestGitService_CreateTree(t *testing.T) { + setup() + defer teardown() + + input := []TreeEntry{ + { + Path: String("file.rb"), + Mode: String("100644"), + Type: String("blob"), + SHA: String("7c258a9869f33c1e1e1f74fbb32f07c86cb5a75b"), + }, + } + + mux.HandleFunc("/repos/o/r/git/trees", func(w http.ResponseWriter, r *http.Request) { + v := new(createTree) + json.NewDecoder(r.Body).Decode(v) + + if m := "POST"; m != r.Method { + t.Errorf("Request method = %v, want %v", r.Method, m) + } + + want := &createTree{ + BaseTree: "b", + Entries: input, + } + if !reflect.DeepEqual(v, want) { + t.Errorf("Git.CreateTree request body: %+v, want %+v", v, want) + } + + fmt.Fprint(w, `{ + "sha": "cd8274d15fa3ae2ab983129fb037999f264ba9a7", + "tree": [ + { + "path": "file.rb", + "mode": "100644", + "type": "blob", + "size": 132, + "sha": "7c258a9869f33c1e1e1f74fbb32f07c86cb5a75b" + } + ] + }`) + }) + + tree, _, err := client.Git.CreateTree(context.Background(), "o", "r", "b", input) + if err != nil { + t.Errorf("Git.CreateTree returned error: %v", err) + } + + want := Tree{ + String("cd8274d15fa3ae2ab983129fb037999f264ba9a7"), + []TreeEntry{ + { + Path: String("file.rb"), + Mode: String("100644"), + Type: String("blob"), + Size: Int(132), + SHA: String("7c258a9869f33c1e1e1f74fbb32f07c86cb5a75b"), + }, + }, + } + + if !reflect.DeepEqual(*tree, want) { + t.Errorf("Git.CreateTree returned %+v, want %+v", *tree, want) + } +} + +func TestGitService_CreateTree_Content(t *testing.T) { + setup() + defer teardown() + + input := []TreeEntry{ + { + Path: String("content.md"), + Mode: String("100644"), + Content: String("file content"), + }, + } + + mux.HandleFunc("/repos/o/r/git/trees", func(w http.ResponseWriter, r *http.Request) { + v := new(createTree) + json.NewDecoder(r.Body).Decode(v) + + if m := "POST"; m != r.Method { + t.Errorf("Request method = %v, want %v", r.Method, m) + } + + want := &createTree{ + BaseTree: "b", + Entries: input, + } + if !reflect.DeepEqual(v, want) { + t.Errorf("Git.CreateTree request body: %+v, want %+v", v, want) + } + + fmt.Fprint(w, `{ + "sha": "5c6780ad2c68743383b740fd1dab6f6a33202b11", + "url": "https://api.github.com/repos/o/r/git/trees/5c6780ad2c68743383b740fd1dab6f6a33202b11", + "tree": [ + { + "mode": "100644", + "type": "blob", + "sha": "aad8feacf6f8063150476a7b2bd9770f2794c08b", + "path": "content.md", + "size": 12, + "url": "https://api.github.com/repos/o/r/git/blobs/aad8feacf6f8063150476a7b2bd9770f2794c08b" + } + ] + }`) + }) + + tree, _, err := client.Git.CreateTree(context.Background(), "o", "r", "b", input) + if err != nil { + t.Errorf("Git.CreateTree returned error: %v", err) + } + + want := Tree{ + String("5c6780ad2c68743383b740fd1dab6f6a33202b11"), + []TreeEntry{ + { + Path: String("content.md"), + Mode: String("100644"), + Type: String("blob"), + Size: Int(12), + SHA: String("aad8feacf6f8063150476a7b2bd9770f2794c08b"), + URL: String("https://api.github.com/repos/o/r/git/blobs/aad8feacf6f8063150476a7b2bd9770f2794c08b"), + }, + }, + } + + if !reflect.DeepEqual(*tree, want) { + t.Errorf("Git.CreateTree returned %+v, want %+v", *tree, want) + } +} + +func TestGitService_CreateTree_invalidOwner(t *testing.T) { + _, _, err := client.Git.CreateTree(context.Background(), "%", "%", "", nil) + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/github_test.go b/vendor/github.com/google/go-github/github/github_test.go new file mode 100644 index 000000000000..ed8ab1a2b8b1 --- /dev/null +++ b/vendor/github.com/google/go-github/github/github_test.go @@ -0,0 +1,913 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path" + "reflect" + "strings" + "testing" + "time" +) + +var ( + // mux is the HTTP request multiplexer used with the test server. + mux *http.ServeMux + + // client is the GitHub client being tested. + client *Client + + // server is a test HTTP server used to provide mock API responses. + server *httptest.Server +) + +// setup sets up a test HTTP server along with a github.Client that is +// configured to talk to that test server. Tests should register handlers on +// mux which provide mock responses for the API method being tested. +func setup() { + // test server + mux = http.NewServeMux() + server = httptest.NewServer(mux) + + // github client configured to use test server + client = NewClient(nil) + url, _ := url.Parse(server.URL) + client.BaseURL = url + client.UploadURL = url +} + +// teardown closes the test HTTP server. +func teardown() { + server.Close() +} + +// openTestFile creates a new file with the given name and content for testing. +// In order to ensure the exact file name, this function will create a new temp +// directory, and create the file in that directory. It is the caller's +// responsibility to remove the directory and its contents when no longer needed. +func openTestFile(name, content string) (file *os.File, dir string, err error) { + dir, err = ioutil.TempDir("", "go-github") + if err != nil { + return nil, dir, err + } + + file, err = os.OpenFile(path.Join(dir, name), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return nil, dir, err + } + + fmt.Fprint(file, content) + + // close and re-open the file to keep file.Stat() happy + file.Close() + file, err = os.Open(file.Name()) + if err != nil { + return nil, dir, err + } + + return file, dir, err +} + +func testMethod(t *testing.T, r *http.Request, want string) { + if got := r.Method; got != want { + t.Errorf("Request method: %v, want %v", got, want) + } +} + +type values map[string]string + +func testFormValues(t *testing.T, r *http.Request, values values) { + want := url.Values{} + for k, v := range values { + want.Set(k, v) + } + + r.ParseForm() + if got := r.Form; !reflect.DeepEqual(got, want) { + t.Errorf("Request parameters: %v, want %v", got, want) + } +} + +func testHeader(t *testing.T, r *http.Request, header string, want string) { + if got := r.Header.Get(header); got != want { + t.Errorf("Header.Get(%q) returned %q, want %q", header, got, want) + } +} + +func testURLParseError(t *testing.T, err error) { + if err == nil { + t.Errorf("Expected error to be returned") + } + if err, ok := err.(*url.Error); !ok || err.Op != "parse" { + t.Errorf("Expected URL parse error, got %+v", err) + } +} + +func testBody(t *testing.T, r *http.Request, want string) { + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Error reading request body: %v", err) + } + if got := string(b); got != want { + t.Errorf("request Body is %s, want %s", got, want) + } +} + +// Helper function to test that a value is marshalled to JSON as expected. +func testJSONMarshal(t *testing.T, v interface{}, want string) { + j, err := json.Marshal(v) + if err != nil { + t.Errorf("Unable to marshal JSON for %v", v) + } + + w := new(bytes.Buffer) + err = json.Compact(w, []byte(want)) + if err != nil { + t.Errorf("String is not valid json: %s", want) + } + + if w.String() != string(j) { + t.Errorf("json.Marshal(%q) returned %s, want %s", v, j, w) + } + + // now go the other direction and make sure things unmarshal as expected + u := reflect.ValueOf(v).Interface() + if err := json.Unmarshal([]byte(want), u); err != nil { + t.Errorf("Unable to unmarshal JSON for %v", want) + } + + if !reflect.DeepEqual(v, u) { + t.Errorf("json.Unmarshal(%q) returned %s, want %s", want, u, v) + } +} + +func TestNewClient(t *testing.T) { + c := NewClient(nil) + + if got, want := c.BaseURL.String(), defaultBaseURL; got != want { + t.Errorf("NewClient BaseURL is %v, want %v", got, want) + } + if got, want := c.UserAgent, userAgent; got != want { + t.Errorf("NewClient UserAgent is %v, want %v", got, want) + } +} + +// Ensure that length of Client.rateLimits is the same as number of fields in RateLimits struct. +func TestClient_rateLimits(t *testing.T) { + if got, want := len(Client{}.rateLimits), reflect.TypeOf(RateLimits{}).NumField(); got != want { + t.Errorf("len(Client{}.rateLimits) is %v, want %v", got, want) + } +} + +func TestNewRequest(t *testing.T) { + c := NewClient(nil) + + inURL, outURL := "/foo", defaultBaseURL+"foo" + inBody, outBody := &User{Login: String("l")}, `{"login":"l"}`+"\n" + req, _ := c.NewRequest("GET", inURL, inBody) + + // test that relative URL was expanded + if got, want := req.URL.String(), outURL; got != want { + t.Errorf("NewRequest(%q) URL is %v, want %v", inURL, got, want) + } + + // test that body was JSON encoded + body, _ := ioutil.ReadAll(req.Body) + if got, want := string(body), outBody; got != want { + t.Errorf("NewRequest(%q) Body is %v, want %v", inBody, got, want) + } + + // test that default user-agent is attached to the request + if got, want := req.Header.Get("User-Agent"), c.UserAgent; got != want { + t.Errorf("NewRequest() User-Agent is %v, want %v", got, want) + } +} + +func TestNewRequest_invalidJSON(t *testing.T) { + c := NewClient(nil) + + type T struct { + A map[interface{}]interface{} + } + _, err := c.NewRequest("GET", "/", &T{}) + + if err == nil { + t.Error("Expected error to be returned.") + } + if err, ok := err.(*json.UnsupportedTypeError); !ok { + t.Errorf("Expected a JSON error; got %#v.", err) + } +} + +func TestNewRequest_badURL(t *testing.T) { + c := NewClient(nil) + _, err := c.NewRequest("GET", ":", nil) + testURLParseError(t, err) +} + +// ensure that no User-Agent header is set if the client's UserAgent is empty. +// This caused a problem with Google's internal http client. +func TestNewRequest_emptyUserAgent(t *testing.T) { + c := NewClient(nil) + c.UserAgent = "" + req, err := c.NewRequest("GET", "/", nil) + if err != nil { + t.Fatalf("NewRequest returned unexpected error: %v", err) + } + if _, ok := req.Header["User-Agent"]; ok { + t.Fatal("constructed request contains unexpected User-Agent header") + } +} + +// If a nil body is passed to github.NewRequest, make sure that nil is also +// passed to http.NewRequest. In most cases, passing an io.Reader that returns +// no content is fine, since there is no difference between an HTTP request +// body that is an empty string versus one that is not set at all. However in +// certain cases, intermediate systems may treat these differently resulting in +// subtle errors. +func TestNewRequest_emptyBody(t *testing.T) { + c := NewClient(nil) + req, err := c.NewRequest("GET", "/", nil) + if err != nil { + t.Fatalf("NewRequest returned unexpected error: %v", err) + } + if req.Body != nil { + t.Fatalf("constructed request contains a non-nil Body") + } +} + +func TestResponse_populatePageValues(t *testing.T) { + r := http.Response{ + Header: http.Header{ + "Link": {`; rel="first",` + + ` ; rel="prev",` + + ` ; rel="next",` + + ` ; rel="last"`, + }, + }, + } + + response := newResponse(&r) + if got, want := response.FirstPage, 1; got != want { + t.Errorf("response.FirstPage: %v, want %v", got, want) + } + if got, want := response.PrevPage, 2; want != got { + t.Errorf("response.PrevPage: %v, want %v", got, want) + } + if got, want := response.NextPage, 4; want != got { + t.Errorf("response.NextPage: %v, want %v", got, want) + } + if got, want := response.LastPage, 5; want != got { + t.Errorf("response.LastPage: %v, want %v", got, want) + } +} + +func TestResponse_populatePageValues_invalid(t *testing.T) { + r := http.Response{ + Header: http.Header{ + "Link": {`,` + + `; rel="first",` + + `https://api.github.com/?page=2; rel="prev",` + + `; rel="next",` + + `; rel="last"`, + }, + }, + } + + response := newResponse(&r) + if got, want := response.FirstPage, 0; got != want { + t.Errorf("response.FirstPage: %v, want %v", got, want) + } + if got, want := response.PrevPage, 0; got != want { + t.Errorf("response.PrevPage: %v, want %v", got, want) + } + if got, want := response.NextPage, 0; got != want { + t.Errorf("response.NextPage: %v, want %v", got, want) + } + if got, want := response.LastPage, 0; got != want { + t.Errorf("response.LastPage: %v, want %v", got, want) + } + + // more invalid URLs + r = http.Response{ + Header: http.Header{ + "Link": {`; rel="first"`}, + }, + } + + response = newResponse(&r) + if got, want := response.FirstPage, 0; got != want { + t.Errorf("response.FirstPage: %v, want %v", got, want) + } +} + +func TestDo(t *testing.T) { + setup() + defer teardown() + + type foo struct { + A string + } + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if m := "GET"; m != r.Method { + t.Errorf("Request method = %v, want %v", r.Method, m) + } + fmt.Fprint(w, `{"A":"a"}`) + }) + + req, _ := client.NewRequest("GET", "/", nil) + body := new(foo) + client.Do(context.Background(), req, body) + + want := &foo{"a"} + if !reflect.DeepEqual(body, want) { + t.Errorf("Response body = %v, want %v", body, want) + } +} + +func TestDo_httpError(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Bad Request", 400) + }) + + req, _ := client.NewRequest("GET", "/", nil) + _, err := client.Do(context.Background(), req, nil) + + if err == nil { + t.Error("Expected HTTP 400 error.") + } +} + +// Test handling of an error caused by the internal http client's Do() +// function. A redirect loop is pretty unlikely to occur within the GitHub +// API, but does allow us to exercise the right code path. +func TestDo_redirectLoop(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "/", http.StatusFound) + }) + + req, _ := client.NewRequest("GET", "/", nil) + _, err := client.Do(context.Background(), req, nil) + + if err == nil { + t.Error("Expected error to be returned.") + } + if err, ok := err.(*url.Error); !ok { + t.Errorf("Expected a URL error; got %#v.", err) + } +} + +// Test that an error caused by the internal http client's Do() function +// does not leak the client secret. +func TestDo_sanitizeURL(t *testing.T) { + tp := &UnauthenticatedRateLimitedTransport{ + ClientID: "id", + ClientSecret: "secret", + } + unauthedClient := NewClient(tp.Client()) + unauthedClient.BaseURL = &url.URL{Scheme: "http", Host: "127.0.0.1:0", Path: "/"} // Use port 0 on purpose to trigger a dial TCP error, expect to get "dial tcp 127.0.0.1:0: connect: can't assign requested address". + req, err := unauthedClient.NewRequest("GET", "/", nil) + if err != nil { + t.Fatalf("NewRequest returned unexpected error: %v", err) + } + _, err = unauthedClient.Do(context.Background(), req, nil) + if err == nil { + t.Fatal("Expected error to be returned.") + } + if strings.Contains(err.Error(), "client_secret=secret") { + t.Errorf("Do error contains secret, should be redacted:\n%q", err) + } +} + +func TestDo_rateLimit(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set(headerRateLimit, "60") + w.Header().Set(headerRateRemaining, "59") + w.Header().Set(headerRateReset, "1372700873") + }) + + req, _ := client.NewRequest("GET", "/", nil) + resp, err := client.Do(context.Background(), req, nil) + if err != nil { + t.Errorf("Do returned unexpected error: %v", err) + } + if got, want := resp.Rate.Limit, 60; got != want { + t.Errorf("Client rate limit = %v, want %v", got, want) + } + if got, want := resp.Rate.Remaining, 59; got != want { + t.Errorf("Client rate remaining = %v, want %v", got, want) + } + reset := time.Date(2013, 7, 1, 17, 47, 53, 0, time.UTC) + if resp.Rate.Reset.UTC() != reset { + t.Errorf("Client rate reset = %v, want %v", resp.Rate.Reset, reset) + } +} + +// ensure rate limit is still parsed, even for error responses +func TestDo_rateLimit_errorResponse(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set(headerRateLimit, "60") + w.Header().Set(headerRateRemaining, "59") + w.Header().Set(headerRateReset, "1372700873") + http.Error(w, "Bad Request", 400) + }) + + req, _ := client.NewRequest("GET", "/", nil) + resp, err := client.Do(context.Background(), req, nil) + if err == nil { + t.Error("Expected error to be returned.") + } + if _, ok := err.(*RateLimitError); ok { + t.Errorf("Did not expect a *RateLimitError error; got %#v.", err) + } + if got, want := resp.Rate.Limit, 60; got != want { + t.Errorf("Client rate limit = %v, want %v", got, want) + } + if got, want := resp.Rate.Remaining, 59; got != want { + t.Errorf("Client rate remaining = %v, want %v", got, want) + } + reset := time.Date(2013, 7, 1, 17, 47, 53, 0, time.UTC) + if resp.Rate.Reset.UTC() != reset { + t.Errorf("Client rate reset = %v, want %v", resp.Rate.Reset, reset) + } +} + +// Ensure *RateLimitError is returned when API rate limit is exceeded. +func TestDo_rateLimit_rateLimitError(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set(headerRateLimit, "60") + w.Header().Set(headerRateRemaining, "0") + w.Header().Set(headerRateReset, "1372700873") + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(http.StatusForbidden) + fmt.Fprintln(w, `{ + "message": "API rate limit exceeded for xxx.xxx.xxx.xxx. (But here's the good news: Authenticated requests get a higher rate limit. Check out the documentation for more details.)", + "documentation_url": "https://developer.github.com/v3/#rate-limiting" +}`) + }) + + req, _ := client.NewRequest("GET", "/", nil) + _, err := client.Do(context.Background(), req, nil) + + if err == nil { + t.Error("Expected error to be returned.") + } + rateLimitErr, ok := err.(*RateLimitError) + if !ok { + t.Fatalf("Expected a *RateLimitError error; got %#v.", err) + } + if got, want := rateLimitErr.Rate.Limit, 60; got != want { + t.Errorf("rateLimitErr rate limit = %v, want %v", got, want) + } + if got, want := rateLimitErr.Rate.Remaining, 0; got != want { + t.Errorf("rateLimitErr rate remaining = %v, want %v", got, want) + } + reset := time.Date(2013, 7, 1, 17, 47, 53, 0, time.UTC) + if rateLimitErr.Rate.Reset.UTC() != reset { + t.Errorf("rateLimitErr rate reset = %v, want %v", rateLimitErr.Rate.Reset.UTC(), reset) + } +} + +// Ensure a network call is not made when it's known that API rate limit is still exceeded. +func TestDo_rateLimit_noNetworkCall(t *testing.T) { + setup() + defer teardown() + + reset := time.Now().UTC().Add(time.Minute).Round(time.Second) // Rate reset is a minute from now, with 1 second precision. + + mux.HandleFunc("/first", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set(headerRateLimit, "60") + w.Header().Set(headerRateRemaining, "0") + w.Header().Set(headerRateReset, fmt.Sprint(reset.Unix())) + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(http.StatusForbidden) + fmt.Fprintln(w, `{ + "message": "API rate limit exceeded for xxx.xxx.xxx.xxx. (But here's the good news: Authenticated requests get a higher rate limit. Check out the documentation for more details.)", + "documentation_url": "https://developer.github.com/v3/#rate-limiting" +}`) + }) + + madeNetworkCall := false + mux.HandleFunc("/second", func(w http.ResponseWriter, r *http.Request) { + madeNetworkCall = true + }) + + // First request is made, and it makes the client aware of rate reset time being in the future. + req, _ := client.NewRequest("GET", "/first", nil) + client.Do(context.Background(), req, nil) + + // Second request should not cause a network call to be made, since client can predict a rate limit error. + req, _ = client.NewRequest("GET", "/second", nil) + _, err := client.Do(context.Background(), req, nil) + + if madeNetworkCall { + t.Fatal("Network call was made, even though rate limit is known to still be exceeded.") + } + + if err == nil { + t.Error("Expected error to be returned.") + } + rateLimitErr, ok := err.(*RateLimitError) + if !ok { + t.Fatalf("Expected a *RateLimitError error; got %#v.", err) + } + if got, want := rateLimitErr.Rate.Limit, 60; got != want { + t.Errorf("rateLimitErr rate limit = %v, want %v", got, want) + } + if got, want := rateLimitErr.Rate.Remaining, 0; got != want { + t.Errorf("rateLimitErr rate remaining = %v, want %v", got, want) + } + if rateLimitErr.Rate.Reset.UTC() != reset { + t.Errorf("rateLimitErr rate reset = %v, want %v", rateLimitErr.Rate.Reset.UTC(), reset) + } +} + +// Ensure *AbuseRateLimitError is returned when the response indicates that +// the client has triggered an abuse detection mechanism. +func TestDo_rateLimit_abuseRateLimitError(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(http.StatusForbidden) + // When the abuse rate limit error is of the "temporarily blocked from content creation" type, + // there is no "Retry-After" header. + fmt.Fprintln(w, `{ + "message": "You have triggered an abuse detection mechanism and have been temporarily blocked from content creation. Please retry your request again later.", + "documentation_url": "https://developer.github.com/v3#abuse-rate-limits" +}`) + }) + + req, _ := client.NewRequest("GET", "/", nil) + _, err := client.Do(context.Background(), req, nil) + + if err == nil { + t.Error("Expected error to be returned.") + } + abuseRateLimitErr, ok := err.(*AbuseRateLimitError) + if !ok { + t.Fatalf("Expected a *AbuseRateLimitError error; got %#v.", err) + } + if got, want := abuseRateLimitErr.RetryAfter, (*time.Duration)(nil); got != want { + t.Errorf("abuseRateLimitErr RetryAfter = %v, want %v", got, want) + } +} + +// Ensure *AbuseRateLimitError.RetryAfter is parsed correctly. +func TestDo_rateLimit_abuseRateLimitError_retryAfter(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Retry-After", "123") // Retry after value of 123 seconds. + w.WriteHeader(http.StatusForbidden) + fmt.Fprintln(w, `{ + "message": "You have triggered an abuse detection mechanism ...", + "documentation_url": "https://developer.github.com/v3#abuse-rate-limits" +}`) + }) + + req, _ := client.NewRequest("GET", "/", nil) + _, err := client.Do(context.Background(), req, nil) + + if err == nil { + t.Error("Expected error to be returned.") + } + abuseRateLimitErr, ok := err.(*AbuseRateLimitError) + if !ok { + t.Fatalf("Expected a *AbuseRateLimitError error; got %#v.", err) + } + if abuseRateLimitErr.RetryAfter == nil { + t.Fatalf("abuseRateLimitErr RetryAfter is nil, expected not-nil") + } + if got, want := *abuseRateLimitErr.RetryAfter, 123*time.Second; got != want { + t.Errorf("abuseRateLimitErr RetryAfter = %v, want %v", got, want) + } +} + +func TestDo_noContent(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + }) + + var body json.RawMessage + + req, _ := client.NewRequest("GET", "/", nil) + _, err := client.Do(context.Background(), req, &body) + if err != nil { + t.Fatalf("Do returned unexpected error: %v", err) + } +} + +func TestSanitizeURL(t *testing.T) { + tests := []struct { + in, want string + }{ + {"/?a=b", "/?a=b"}, + {"/?a=b&client_secret=secret", "/?a=b&client_secret=REDACTED"}, + {"/?a=b&client_id=id&client_secret=secret", "/?a=b&client_id=id&client_secret=REDACTED"}, + } + + for _, tt := range tests { + inURL, _ := url.Parse(tt.in) + want, _ := url.Parse(tt.want) + + if got := sanitizeURL(inURL); !reflect.DeepEqual(got, want) { + t.Errorf("sanitizeURL(%v) returned %v, want %v", tt.in, got, want) + } + } +} + +func TestCheckResponse(t *testing.T) { + res := &http.Response{ + Request: &http.Request{}, + StatusCode: http.StatusBadRequest, + Body: ioutil.NopCloser(strings.NewReader(`{"message":"m", + "errors": [{"resource": "r", "field": "f", "code": "c"}], + "block": {"reason": "dmca", "created_at": "2016-03-17T15:39:46Z"}}`)), + } + err := CheckResponse(res).(*ErrorResponse) + + if err == nil { + t.Errorf("Expected error response.") + } + + want := &ErrorResponse{ + Response: res, + Message: "m", + Errors: []Error{{Resource: "r", Field: "f", Code: "c"}}, + Block: &struct { + Reason string `json:"reason,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + }{ + Reason: "dmca", + CreatedAt: &Timestamp{time.Date(2016, 3, 17, 15, 39, 46, 0, time.UTC)}, + }, + } + if !reflect.DeepEqual(err, want) { + t.Errorf("Error = %#v, want %#v", err, want) + } +} + +// ensure that we properly handle API errors that do not contain a response body +func TestCheckResponse_noBody(t *testing.T) { + res := &http.Response{ + Request: &http.Request{}, + StatusCode: http.StatusBadRequest, + Body: ioutil.NopCloser(strings.NewReader("")), + } + err := CheckResponse(res).(*ErrorResponse) + + if err == nil { + t.Errorf("Expected error response.") + } + + want := &ErrorResponse{ + Response: res, + } + if !reflect.DeepEqual(err, want) { + t.Errorf("Error = %#v, want %#v", err, want) + } +} + +func TestParseBooleanResponse_true(t *testing.T) { + result, err := parseBoolResponse(nil) + if err != nil { + t.Errorf("parseBoolResponse returned error: %+v", err) + } + + if want := true; result != want { + t.Errorf("parseBoolResponse returned %+v, want: %+v", result, want) + } +} + +func TestParseBooleanResponse_false(t *testing.T) { + v := &ErrorResponse{Response: &http.Response{StatusCode: http.StatusNotFound}} + result, err := parseBoolResponse(v) + if err != nil { + t.Errorf("parseBoolResponse returned error: %+v", err) + } + + if want := false; result != want { + t.Errorf("parseBoolResponse returned %+v, want: %+v", result, want) + } +} + +func TestParseBooleanResponse_error(t *testing.T) { + v := &ErrorResponse{Response: &http.Response{StatusCode: http.StatusBadRequest}} + result, err := parseBoolResponse(v) + + if err == nil { + t.Errorf("Expected error to be returned.") + } + + if want := false; result != want { + t.Errorf("parseBoolResponse returned %+v, want: %+v", result, want) + } +} + +func TestErrorResponse_Error(t *testing.T) { + res := &http.Response{Request: &http.Request{}} + err := ErrorResponse{Message: "m", Response: res} + if err.Error() == "" { + t.Errorf("Expected non-empty ErrorResponse.Error()") + } +} + +func TestError_Error(t *testing.T) { + err := Error{} + if err.Error() == "" { + t.Errorf("Expected non-empty Error.Error()") + } +} + +func TestRateLimits(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/rate_limit", func(w http.ResponseWriter, r *http.Request) { + if m := "GET"; m != r.Method { + t.Errorf("Request method = %v, want %v", r.Method, m) + } + fmt.Fprint(w, `{"resources":{ + "core": {"limit":2,"remaining":1,"reset":1372700873}, + "search": {"limit":3,"remaining":2,"reset":1372700874} + }}`) + }) + + rate, _, err := client.RateLimits(context.Background()) + if err != nil { + t.Errorf("RateLimits returned error: %v", err) + } + + want := &RateLimits{ + Core: &Rate{ + Limit: 2, + Remaining: 1, + Reset: Timestamp{time.Date(2013, 7, 1, 17, 47, 53, 0, time.UTC).Local()}, + }, + Search: &Rate{ + Limit: 3, + Remaining: 2, + Reset: Timestamp{time.Date(2013, 7, 1, 17, 47, 54, 0, time.UTC).Local()}, + }, + } + if !reflect.DeepEqual(rate, want) { + t.Errorf("RateLimits returned %+v, want %+v", rate, want) + } + + if got, want := client.rateLimits[coreCategory], *want.Core; got != want { + t.Errorf("client.rateLimits[coreCategory] is %+v, want %+v", got, want) + } + if got, want := client.rateLimits[searchCategory], *want.Search; got != want { + t.Errorf("client.rateLimits[searchCategory] is %+v, want %+v", got, want) + } +} + +func TestUnauthenticatedRateLimitedTransport(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + var v, want string + q := r.URL.Query() + if v, want = q.Get("client_id"), "id"; v != want { + t.Errorf("OAuth Client ID = %v, want %v", v, want) + } + if v, want = q.Get("client_secret"), "secret"; v != want { + t.Errorf("OAuth Client Secret = %v, want %v", v, want) + } + }) + + tp := &UnauthenticatedRateLimitedTransport{ + ClientID: "id", + ClientSecret: "secret", + } + unauthedClient := NewClient(tp.Client()) + unauthedClient.BaseURL = client.BaseURL + req, _ := unauthedClient.NewRequest("GET", "/", nil) + unauthedClient.Do(context.Background(), req, nil) +} + +func TestUnauthenticatedRateLimitedTransport_missingFields(t *testing.T) { + // missing ClientID + tp := &UnauthenticatedRateLimitedTransport{ + ClientSecret: "secret", + } + _, err := tp.RoundTrip(nil) + if err == nil { + t.Errorf("Expected error to be returned") + } + + // missing ClientSecret + tp = &UnauthenticatedRateLimitedTransport{ + ClientID: "id", + } + _, err = tp.RoundTrip(nil) + if err == nil { + t.Errorf("Expected error to be returned") + } +} + +func TestUnauthenticatedRateLimitedTransport_transport(t *testing.T) { + // default transport + tp := &UnauthenticatedRateLimitedTransport{ + ClientID: "id", + ClientSecret: "secret", + } + if tp.transport() != http.DefaultTransport { + t.Errorf("Expected http.DefaultTransport to be used.") + } + + // custom transport + tp = &UnauthenticatedRateLimitedTransport{ + ClientID: "id", + ClientSecret: "secret", + Transport: &http.Transport{}, + } + if tp.transport() == http.DefaultTransport { + t.Errorf("Expected custom transport to be used.") + } +} + +func TestBasicAuthTransport(t *testing.T) { + setup() + defer teardown() + + username, password, otp := "u", "p", "123456" + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + if !ok { + t.Errorf("request does not contain basic auth credentials") + } + if u != username { + t.Errorf("request contained basic auth username %q, want %q", u, username) + } + if p != password { + t.Errorf("request contained basic auth password %q, want %q", p, password) + } + if got, want := r.Header.Get(headerOTP), otp; got != want { + t.Errorf("request contained OTP %q, want %q", got, want) + } + }) + + tp := &BasicAuthTransport{ + Username: username, + Password: password, + OTP: otp, + } + basicAuthClient := NewClient(tp.Client()) + basicAuthClient.BaseURL = client.BaseURL + req, _ := basicAuthClient.NewRequest("GET", "/", nil) + basicAuthClient.Do(context.Background(), req, nil) +} + +func TestBasicAuthTransport_transport(t *testing.T) { + // default transport + tp := &BasicAuthTransport{} + if tp.transport() != http.DefaultTransport { + t.Errorf("Expected http.DefaultTransport to be used.") + } + + // custom transport + tp = &BasicAuthTransport{ + Transport: &http.Transport{}, + } + if tp.transport() == http.DefaultTransport { + t.Errorf("Expected custom transport to be used.") + } +} diff --git a/vendor/github.com/google/go-github/github/gitignore_test.go b/vendor/github.com/google/go-github/github/gitignore_test.go new file mode 100644 index 000000000000..e5a62643b5df --- /dev/null +++ b/vendor/github.com/google/go-github/github/gitignore_test.go @@ -0,0 +1,59 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestGitignoresService_List(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/gitignore/templates", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `["C", "Go"]`) + }) + + available, _, err := client.Gitignores.List(context.Background()) + if err != nil { + t.Errorf("Gitignores.List returned error: %v", err) + } + + want := []string{"C", "Go"} + if !reflect.DeepEqual(available, want) { + t.Errorf("Gitignores.List returned %+v, want %+v", available, want) + } +} + +func TestGitignoresService_Get(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/gitignore/templates/name", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"name":"Name","source":"template source"}`) + }) + + gitignore, _, err := client.Gitignores.Get(context.Background(), "name") + if err != nil { + t.Errorf("Gitignores.List returned error: %v", err) + } + + want := &Gitignore{Name: String("Name"), Source: String("template source")} + if !reflect.DeepEqual(gitignore, want) { + t.Errorf("Gitignores.Get returned %+v, want %+v", gitignore, want) + } +} + +func TestGitignoresService_Get_invalidTemplate(t *testing.T) { + _, _, err := client.Gitignores.Get(context.Background(), "%") + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/issues_assignees_test.go b/vendor/github.com/google/go-github/github/issues_assignees_test.go new file mode 100644 index 000000000000..d88a7888a4e2 --- /dev/null +++ b/vendor/github.com/google/go-github/github/issues_assignees_test.go @@ -0,0 +1,158 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestIssuesService_ListAssignees(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/assignees", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 2} + assignees, _, err := client.Issues.ListAssignees(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Issues.ListAssignees returned error: %v", err) + } + + want := []*User{{ID: Int(1)}} + if !reflect.DeepEqual(assignees, want) { + t.Errorf("Issues.ListAssignees returned %+v, want %+v", assignees, want) + } +} + +func TestIssuesService_ListAssignees_invalidOwner(t *testing.T) { + _, _, err := client.Issues.ListAssignees(context.Background(), "%", "r", nil) + testURLParseError(t, err) +} + +func TestIssuesService_IsAssignee_true(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/assignees/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + }) + + assignee, _, err := client.Issues.IsAssignee(context.Background(), "o", "r", "u") + if err != nil { + t.Errorf("Issues.IsAssignee returned error: %v", err) + } + if want := true; assignee != want { + t.Errorf("Issues.IsAssignee returned %+v, want %+v", assignee, want) + } +} + +func TestIssuesService_IsAssignee_false(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/assignees/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNotFound) + }) + + assignee, _, err := client.Issues.IsAssignee(context.Background(), "o", "r", "u") + if err != nil { + t.Errorf("Issues.IsAssignee returned error: %v", err) + } + if want := false; assignee != want { + t.Errorf("Issues.IsAssignee returned %+v, want %+v", assignee, want) + } +} + +func TestIssuesService_IsAssignee_error(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/assignees/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + http.Error(w, "BadRequest", http.StatusBadRequest) + }) + + assignee, _, err := client.Issues.IsAssignee(context.Background(), "o", "r", "u") + if err == nil { + t.Errorf("Expected HTTP 400 response") + } + if want := false; assignee != want { + t.Errorf("Issues.IsAssignee returned %+v, want %+v", assignee, want) + } +} + +func TestIssuesService_IsAssignee_invalidOwner(t *testing.T) { + _, _, err := client.Issues.IsAssignee(context.Background(), "%", "r", "u") + testURLParseError(t, err) +} + +func TestIssuesService_AddAssignees(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/1/assignees", func(w http.ResponseWriter, r *http.Request) { + var assignees struct { + Assignees []string `json:"assignees,omitempty"` + } + json.NewDecoder(r.Body).Decode(&assignees) + + testMethod(t, r, "POST") + want := []string{"user1", "user2"} + if !reflect.DeepEqual(assignees.Assignees, want) { + t.Errorf("assignees = %+v, want %+v", assignees, want) + } + fmt.Fprint(w, `{"number":1,"assignees":[{"login":"user1"},{"login":"user2"}]}`) + }) + + got, _, err := client.Issues.AddAssignees(context.Background(), "o", "r", 1, []string{"user1", "user2"}) + if err != nil { + t.Errorf("Issues.AddAssignees returned error: %v", err) + } + + want := &Issue{Number: Int(1), Assignees: []*User{{Login: String("user1")}, {Login: String("user2")}}} + if !reflect.DeepEqual(got, want) { + t.Errorf("Issues.AddAssignees = %+v, want %+v", got, want) + } +} + +func TestIssuesService_RemoveAssignees(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/1/assignees", func(w http.ResponseWriter, r *http.Request) { + var assignees struct { + Assignees []string `json:"assignees,omitempty"` + } + json.NewDecoder(r.Body).Decode(&assignees) + + testMethod(t, r, "DELETE") + want := []string{"user1", "user2"} + if !reflect.DeepEqual(assignees.Assignees, want) { + t.Errorf("assignees = %+v, want %+v", assignees, want) + } + fmt.Fprint(w, `{"number":1,"assignees":[]}`) + }) + + got, _, err := client.Issues.RemoveAssignees(context.Background(), "o", "r", 1, []string{"user1", "user2"}) + if err != nil { + t.Errorf("Issues.RemoveAssignees returned error: %v", err) + } + + want := &Issue{Number: Int(1), Assignees: []*User{}} + if !reflect.DeepEqual(got, want) { + t.Errorf("Issues.RemoveAssignees = %+v, want %+v", got, want) + } +} diff --git a/vendor/github.com/google/go-github/github/issues_comments_test.go b/vendor/github.com/google/go-github/github/issues_comments_test.go new file mode 100644 index 000000000000..78c486362f43 --- /dev/null +++ b/vendor/github.com/google/go-github/github/issues_comments_test.go @@ -0,0 +1,188 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" + "time" +) + +func TestIssuesService_ListComments_allIssues(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/comments", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + testFormValues(t, r, values{ + "sort": "updated", + "direction": "desc", + "since": "2002-02-10T15:30:00Z", + "page": "2", + }) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &IssueListCommentsOptions{ + Sort: "updated", + Direction: "desc", + Since: time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC), + ListOptions: ListOptions{Page: 2}, + } + comments, _, err := client.Issues.ListComments(context.Background(), "o", "r", 0, opt) + if err != nil { + t.Errorf("Issues.ListComments returned error: %v", err) + } + + want := []*IssueComment{{ID: Int(1)}} + if !reflect.DeepEqual(comments, want) { + t.Errorf("Issues.ListComments returned %+v, want %+v", comments, want) + } +} + +func TestIssuesService_ListComments_specificIssue(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/1/comments", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + fmt.Fprint(w, `[{"id":1}]`) + }) + + comments, _, err := client.Issues.ListComments(context.Background(), "o", "r", 1, nil) + if err != nil { + t.Errorf("Issues.ListComments returned error: %v", err) + } + + want := []*IssueComment{{ID: Int(1)}} + if !reflect.DeepEqual(comments, want) { + t.Errorf("Issues.ListComments returned %+v, want %+v", comments, want) + } +} + +func TestIssuesService_ListComments_invalidOwner(t *testing.T) { + _, _, err := client.Issues.ListComments(context.Background(), "%", "r", 1, nil) + testURLParseError(t, err) +} + +func TestIssuesService_GetComment(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/comments/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + fmt.Fprint(w, `{"id":1}`) + }) + + comment, _, err := client.Issues.GetComment(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Issues.GetComment returned error: %v", err) + } + + want := &IssueComment{ID: Int(1)} + if !reflect.DeepEqual(comment, want) { + t.Errorf("Issues.GetComment returned %+v, want %+v", comment, want) + } +} + +func TestIssuesService_GetComment_invalidOrg(t *testing.T) { + _, _, err := client.Issues.GetComment(context.Background(), "%", "r", 1) + testURLParseError(t, err) +} + +func TestIssuesService_CreateComment(t *testing.T) { + setup() + defer teardown() + + input := &IssueComment{Body: String("b")} + + mux.HandleFunc("/repos/o/r/issues/1/comments", func(w http.ResponseWriter, r *http.Request) { + v := new(IssueComment) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + comment, _, err := client.Issues.CreateComment(context.Background(), "o", "r", 1, input) + if err != nil { + t.Errorf("Issues.CreateComment returned error: %v", err) + } + + want := &IssueComment{ID: Int(1)} + if !reflect.DeepEqual(comment, want) { + t.Errorf("Issues.CreateComment returned %+v, want %+v", comment, want) + } +} + +func TestIssuesService_CreateComment_invalidOrg(t *testing.T) { + _, _, err := client.Issues.CreateComment(context.Background(), "%", "r", 1, nil) + testURLParseError(t, err) +} + +func TestIssuesService_EditComment(t *testing.T) { + setup() + defer teardown() + + input := &IssueComment{Body: String("b")} + + mux.HandleFunc("/repos/o/r/issues/comments/1", func(w http.ResponseWriter, r *http.Request) { + v := new(IssueComment) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + comment, _, err := client.Issues.EditComment(context.Background(), "o", "r", 1, input) + if err != nil { + t.Errorf("Issues.EditComment returned error: %v", err) + } + + want := &IssueComment{ID: Int(1)} + if !reflect.DeepEqual(comment, want) { + t.Errorf("Issues.EditComment returned %+v, want %+v", comment, want) + } +} + +func TestIssuesService_EditComment_invalidOwner(t *testing.T) { + _, _, err := client.Issues.EditComment(context.Background(), "%", "r", 1, nil) + testURLParseError(t, err) +} + +func TestIssuesService_DeleteComment(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/comments/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Issues.DeleteComment(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Issues.DeleteComments returned error: %v", err) + } +} + +func TestIssuesService_DeleteComment_invalidOwner(t *testing.T) { + _, err := client.Issues.DeleteComment(context.Background(), "%", "r", 1) + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/issues_events_test.go b/vendor/github.com/google/go-github/github/issues_events_test.go new file mode 100644 index 000000000000..d85b6223eaa1 --- /dev/null +++ b/vendor/github.com/google/go-github/github/issues_events_test.go @@ -0,0 +1,84 @@ +// Copyright 2014 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestIssuesService_ListIssueEvents(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/1/events", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "1", + "per_page": "2", + }) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 1, PerPage: 2} + events, _, err := client.Issues.ListIssueEvents(context.Background(), "o", "r", 1, opt) + if err != nil { + t.Errorf("Issues.ListIssueEvents returned error: %v", err) + } + + want := []*IssueEvent{{ID: Int(1)}} + if !reflect.DeepEqual(events, want) { + t.Errorf("Issues.ListIssueEvents returned %+v, want %+v", events, want) + } +} + +func TestIssuesService_ListRepositoryEvents(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/events", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "1", + "per_page": "2", + }) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 1, PerPage: 2} + events, _, err := client.Issues.ListRepositoryEvents(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Issues.ListRepositoryEvents returned error: %v", err) + } + + want := []*IssueEvent{{ID: Int(1)}} + if !reflect.DeepEqual(events, want) { + t.Errorf("Issues.ListRepositoryEvents returned %+v, want %+v", events, want) + } +} + +func TestIssuesService_GetEvent(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/events/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":1}`) + }) + + event, _, err := client.Issues.GetEvent(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Issues.GetEvent returned error: %v", err) + } + + want := &IssueEvent{ID: Int(1)} + if !reflect.DeepEqual(event, want) { + t.Errorf("Issues.GetEvent returned %+v, want %+v", event, want) + } +} diff --git a/vendor/github.com/google/go-github/github/issues_labels_test.go b/vendor/github.com/google/go-github/github/issues_labels_test.go new file mode 100644 index 000000000000..5380427a1e61 --- /dev/null +++ b/vendor/github.com/google/go-github/github/issues_labels_test.go @@ -0,0 +1,317 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestIssuesService_ListLabels(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/labels", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"name": "a"},{"name": "b"}]`) + }) + + opt := &ListOptions{Page: 2} + labels, _, err := client.Issues.ListLabels(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Issues.ListLabels returned error: %v", err) + } + + want := []*Label{{Name: String("a")}, {Name: String("b")}} + if !reflect.DeepEqual(labels, want) { + t.Errorf("Issues.ListLabels returned %+v, want %+v", labels, want) + } +} + +func TestIssuesService_ListLabels_invalidOwner(t *testing.T) { + _, _, err := client.Issues.ListLabels(context.Background(), "%", "%", nil) + testURLParseError(t, err) +} + +func TestIssuesService_GetLabel(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/labels/n", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"url":"u", "name": "n", "color": "c"}`) + }) + + label, _, err := client.Issues.GetLabel(context.Background(), "o", "r", "n") + if err != nil { + t.Errorf("Issues.GetLabel returned error: %v", err) + } + + want := &Label{URL: String("u"), Name: String("n"), Color: String("c")} + if !reflect.DeepEqual(label, want) { + t.Errorf("Issues.GetLabel returned %+v, want %+v", label, want) + } +} + +func TestIssuesService_GetLabel_invalidOwner(t *testing.T) { + _, _, err := client.Issues.GetLabel(context.Background(), "%", "%", "%") + testURLParseError(t, err) +} + +func TestIssuesService_CreateLabel(t *testing.T) { + setup() + defer teardown() + + input := &Label{Name: String("n")} + + mux.HandleFunc("/repos/o/r/labels", func(w http.ResponseWriter, r *http.Request) { + v := new(Label) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"url":"u"}`) + }) + + label, _, err := client.Issues.CreateLabel(context.Background(), "o", "r", input) + if err != nil { + t.Errorf("Issues.CreateLabel returned error: %v", err) + } + + want := &Label{URL: String("u")} + if !reflect.DeepEqual(label, want) { + t.Errorf("Issues.CreateLabel returned %+v, want %+v", label, want) + } +} + +func TestIssuesService_CreateLabel_invalidOwner(t *testing.T) { + _, _, err := client.Issues.CreateLabel(context.Background(), "%", "%", nil) + testURLParseError(t, err) +} + +func TestIssuesService_EditLabel(t *testing.T) { + setup() + defer teardown() + + input := &Label{Name: String("z")} + + mux.HandleFunc("/repos/o/r/labels/n", func(w http.ResponseWriter, r *http.Request) { + v := new(Label) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"url":"u"}`) + }) + + label, _, err := client.Issues.EditLabel(context.Background(), "o", "r", "n", input) + if err != nil { + t.Errorf("Issues.EditLabel returned error: %v", err) + } + + want := &Label{URL: String("u")} + if !reflect.DeepEqual(label, want) { + t.Errorf("Issues.EditLabel returned %+v, want %+v", label, want) + } +} + +func TestIssuesService_EditLabel_invalidOwner(t *testing.T) { + _, _, err := client.Issues.EditLabel(context.Background(), "%", "%", "%", nil) + testURLParseError(t, err) +} + +func TestIssuesService_DeleteLabel(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/labels/n", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Issues.DeleteLabel(context.Background(), "o", "r", "n") + if err != nil { + t.Errorf("Issues.DeleteLabel returned error: %v", err) + } +} + +func TestIssuesService_DeleteLabel_invalidOwner(t *testing.T) { + _, err := client.Issues.DeleteLabel(context.Background(), "%", "%", "%") + testURLParseError(t, err) +} + +func TestIssuesService_ListLabelsByIssue(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/1/labels", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"name":"a","id":1},{"name":"b","id":2}]`) + }) + + opt := &ListOptions{Page: 2} + labels, _, err := client.Issues.ListLabelsByIssue(context.Background(), "o", "r", 1, opt) + if err != nil { + t.Errorf("Issues.ListLabelsByIssue returned error: %v", err) + } + + want := []*Label{ + {Name: String("a"), ID: Int(1)}, + {Name: String("b"), ID: Int(2)}, + } + if !reflect.DeepEqual(labels, want) { + t.Errorf("Issues.ListLabelsByIssue returned %+v, want %+v", labels, want) + } +} + +func TestIssuesService_ListLabelsByIssue_invalidOwner(t *testing.T) { + _, _, err := client.Issues.ListLabelsByIssue(context.Background(), "%", "%", 1, nil) + testURLParseError(t, err) +} + +func TestIssuesService_AddLabelsToIssue(t *testing.T) { + setup() + defer teardown() + + input := []string{"a", "b"} + + mux.HandleFunc("/repos/o/r/issues/1/labels", func(w http.ResponseWriter, r *http.Request) { + var v []string + json.NewDecoder(r.Body).Decode(&v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `[{"url":"u"}]`) + }) + + labels, _, err := client.Issues.AddLabelsToIssue(context.Background(), "o", "r", 1, input) + if err != nil { + t.Errorf("Issues.AddLabelsToIssue returned error: %v", err) + } + + want := []*Label{{URL: String("u")}} + if !reflect.DeepEqual(labels, want) { + t.Errorf("Issues.AddLabelsToIssue returned %+v, want %+v", labels, want) + } +} + +func TestIssuesService_AddLabelsToIssue_invalidOwner(t *testing.T) { + _, _, err := client.Issues.AddLabelsToIssue(context.Background(), "%", "%", 1, nil) + testURLParseError(t, err) +} + +func TestIssuesService_RemoveLabelForIssue(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/1/labels/l", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Issues.RemoveLabelForIssue(context.Background(), "o", "r", 1, "l") + if err != nil { + t.Errorf("Issues.RemoveLabelForIssue returned error: %v", err) + } +} + +func TestIssuesService_RemoveLabelForIssue_invalidOwner(t *testing.T) { + _, err := client.Issues.RemoveLabelForIssue(context.Background(), "%", "%", 1, "%") + testURLParseError(t, err) +} + +func TestIssuesService_ReplaceLabelsForIssue(t *testing.T) { + setup() + defer teardown() + + input := []string{"a", "b"} + + mux.HandleFunc("/repos/o/r/issues/1/labels", func(w http.ResponseWriter, r *http.Request) { + var v []string + json.NewDecoder(r.Body).Decode(&v) + + testMethod(t, r, "PUT") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `[{"url":"u"}]`) + }) + + labels, _, err := client.Issues.ReplaceLabelsForIssue(context.Background(), "o", "r", 1, input) + if err != nil { + t.Errorf("Issues.ReplaceLabelsForIssue returned error: %v", err) + } + + want := []*Label{{URL: String("u")}} + if !reflect.DeepEqual(labels, want) { + t.Errorf("Issues.ReplaceLabelsForIssue returned %+v, want %+v", labels, want) + } +} + +func TestIssuesService_ReplaceLabelsForIssue_invalidOwner(t *testing.T) { + _, _, err := client.Issues.ReplaceLabelsForIssue(context.Background(), "%", "%", 1, nil) + testURLParseError(t, err) +} + +func TestIssuesService_RemoveLabelsForIssue(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/1/labels", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Issues.RemoveLabelsForIssue(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Issues.RemoveLabelsForIssue returned error: %v", err) + } +} + +func TestIssuesService_RemoveLabelsForIssue_invalidOwner(t *testing.T) { + _, err := client.Issues.RemoveLabelsForIssue(context.Background(), "%", "%", 1) + testURLParseError(t, err) +} + +func TestIssuesService_ListLabelsForMilestone(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/milestones/1/labels", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"name": "a"},{"name": "b"}]`) + }) + + opt := &ListOptions{Page: 2} + labels, _, err := client.Issues.ListLabelsForMilestone(context.Background(), "o", "r", 1, opt) + if err != nil { + t.Errorf("Issues.ListLabelsForMilestone returned error: %v", err) + } + + want := []*Label{{Name: String("a")}, {Name: String("b")}} + if !reflect.DeepEqual(labels, want) { + t.Errorf("Issues.ListLabelsForMilestone returned %+v, want %+v", labels, want) + } +} + +func TestIssuesService_ListLabelsForMilestone_invalidOwner(t *testing.T) { + _, _, err := client.Issues.ListLabelsForMilestone(context.Background(), "%", "%", 1, nil) + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/issues_milestones_test.go b/vendor/github.com/google/go-github/github/issues_milestones_test.go new file mode 100644 index 000000000000..befa1b48727c --- /dev/null +++ b/vendor/github.com/google/go-github/github/issues_milestones_test.go @@ -0,0 +1,159 @@ +// Copyright 2014 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestIssuesService_ListMilestones(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/milestones", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "state": "closed", + "sort": "due_date", + "direction": "asc", + "page": "2", + }) + fmt.Fprint(w, `[{"number":1}]`) + }) + + opt := &MilestoneListOptions{"closed", "due_date", "asc", ListOptions{Page: 2}} + milestones, _, err := client.Issues.ListMilestones(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("IssuesService.ListMilestones returned error: %v", err) + } + + want := []*Milestone{{Number: Int(1)}} + if !reflect.DeepEqual(milestones, want) { + t.Errorf("IssuesService.ListMilestones returned %+v, want %+v", milestones, want) + } +} + +func TestIssuesService_ListMilestones_invalidOwner(t *testing.T) { + _, _, err := client.Issues.ListMilestones(context.Background(), "%", "r", nil) + testURLParseError(t, err) +} + +func TestIssuesService_GetMilestone(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/milestones/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"number":1}`) + }) + + milestone, _, err := client.Issues.GetMilestone(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("IssuesService.GetMilestone returned error: %v", err) + } + + want := &Milestone{Number: Int(1)} + if !reflect.DeepEqual(milestone, want) { + t.Errorf("IssuesService.GetMilestone returned %+v, want %+v", milestone, want) + } +} + +func TestIssuesService_GetMilestone_invalidOwner(t *testing.T) { + _, _, err := client.Issues.GetMilestone(context.Background(), "%", "r", 1) + testURLParseError(t, err) +} + +func TestIssuesService_CreateMilestone(t *testing.T) { + setup() + defer teardown() + + input := &Milestone{Title: String("t")} + + mux.HandleFunc("/repos/o/r/milestones", func(w http.ResponseWriter, r *http.Request) { + v := new(Milestone) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"number":1}`) + }) + + milestone, _, err := client.Issues.CreateMilestone(context.Background(), "o", "r", input) + if err != nil { + t.Errorf("IssuesService.CreateMilestone returned error: %v", err) + } + + want := &Milestone{Number: Int(1)} + if !reflect.DeepEqual(milestone, want) { + t.Errorf("IssuesService.CreateMilestone returned %+v, want %+v", milestone, want) + } +} + +func TestIssuesService_CreateMilestone_invalidOwner(t *testing.T) { + _, _, err := client.Issues.CreateMilestone(context.Background(), "%", "r", nil) + testURLParseError(t, err) +} + +func TestIssuesService_EditMilestone(t *testing.T) { + setup() + defer teardown() + + input := &Milestone{Title: String("t")} + + mux.HandleFunc("/repos/o/r/milestones/1", func(w http.ResponseWriter, r *http.Request) { + v := new(Milestone) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"number":1}`) + }) + + milestone, _, err := client.Issues.EditMilestone(context.Background(), "o", "r", 1, input) + if err != nil { + t.Errorf("IssuesService.EditMilestone returned error: %v", err) + } + + want := &Milestone{Number: Int(1)} + if !reflect.DeepEqual(milestone, want) { + t.Errorf("IssuesService.EditMilestone returned %+v, want %+v", milestone, want) + } +} + +func TestIssuesService_EditMilestone_invalidOwner(t *testing.T) { + _, _, err := client.Issues.EditMilestone(context.Background(), "%", "r", 1, nil) + testURLParseError(t, err) +} + +func TestIssuesService_DeleteMilestone(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/milestones/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Issues.DeleteMilestone(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("IssuesService.DeleteMilestone returned error: %v", err) + } +} + +func TestIssuesService_DeleteMilestone_invalidOwner(t *testing.T) { + _, err := client.Issues.DeleteMilestone(context.Background(), "%", "r", 1) + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/issues_test.go b/vendor/github.com/google/go-github/github/issues_test.go new file mode 100644 index 000000000000..dadee75573e3 --- /dev/null +++ b/vendor/github.com/google/go-github/github/issues_test.go @@ -0,0 +1,277 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" + "time" +) + +func TestIssuesService_List_all(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/issues", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + testFormValues(t, r, values{ + "filter": "all", + "state": "closed", + "labels": "a,b", + "sort": "updated", + "direction": "asc", + "since": "2002-02-10T15:30:00Z", + "page": "1", + "per_page": "2", + }) + fmt.Fprint(w, `[{"number":1}]`) + }) + + opt := &IssueListOptions{ + "all", "closed", []string{"a", "b"}, "updated", "asc", + time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC), + ListOptions{Page: 1, PerPage: 2}, + } + issues, _, err := client.Issues.List(context.Background(), true, opt) + if err != nil { + t.Errorf("Issues.List returned error: %v", err) + } + + want := []*Issue{{Number: Int(1)}} + if !reflect.DeepEqual(issues, want) { + t.Errorf("Issues.List returned %+v, want %+v", issues, want) + } +} + +func TestIssuesService_List_owned(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/issues", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + fmt.Fprint(w, `[{"number":1}]`) + }) + + issues, _, err := client.Issues.List(context.Background(), false, nil) + if err != nil { + t.Errorf("Issues.List returned error: %v", err) + } + + want := []*Issue{{Number: Int(1)}} + if !reflect.DeepEqual(issues, want) { + t.Errorf("Issues.List returned %+v, want %+v", issues, want) + } +} + +func TestIssuesService_ListByOrg(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/issues", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + fmt.Fprint(w, `[{"number":1}]`) + }) + + issues, _, err := client.Issues.ListByOrg(context.Background(), "o", nil) + if err != nil { + t.Errorf("Issues.ListByOrg returned error: %v", err) + } + + want := []*Issue{{Number: Int(1)}} + if !reflect.DeepEqual(issues, want) { + t.Errorf("Issues.List returned %+v, want %+v", issues, want) + } +} + +func TestIssuesService_ListByOrg_invalidOrg(t *testing.T) { + _, _, err := client.Issues.ListByOrg(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestIssuesService_ListByRepo(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + testFormValues(t, r, values{ + "milestone": "*", + "state": "closed", + "assignee": "a", + "creator": "c", + "mentioned": "m", + "labels": "a,b", + "sort": "updated", + "direction": "asc", + "since": "2002-02-10T15:30:00Z", + }) + fmt.Fprint(w, `[{"number":1}]`) + }) + + opt := &IssueListByRepoOptions{ + "*", "closed", "a", "c", "m", []string{"a", "b"}, "updated", "asc", + time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC), + ListOptions{0, 0}, + } + issues, _, err := client.Issues.ListByRepo(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Issues.ListByOrg returned error: %v", err) + } + + want := []*Issue{{Number: Int(1)}} + if !reflect.DeepEqual(issues, want) { + t.Errorf("Issues.List returned %+v, want %+v", issues, want) + } +} + +func TestIssuesService_ListByRepo_invalidOwner(t *testing.T) { + _, _, err := client.Issues.ListByRepo(context.Background(), "%", "r", nil) + testURLParseError(t, err) +} + +func TestIssuesService_Get(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + fmt.Fprint(w, `{"number":1, "labels": [{"url": "u", "name": "n", "color": "c"}]}`) + }) + + issue, _, err := client.Issues.Get(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Issues.Get returned error: %v", err) + } + + want := &Issue{ + Number: Int(1), + Labels: []Label{{ + URL: String("u"), + Name: String("n"), + Color: String("c"), + }}, + } + if !reflect.DeepEqual(issue, want) { + t.Errorf("Issues.Get returned %+v, want %+v", issue, want) + } +} + +func TestIssuesService_Get_invalidOwner(t *testing.T) { + _, _, err := client.Issues.Get(context.Background(), "%", "r", 1) + testURLParseError(t, err) +} + +func TestIssuesService_Create(t *testing.T) { + setup() + defer teardown() + + input := &IssueRequest{ + Title: String("t"), + Body: String("b"), + Assignee: String("a"), + Labels: &[]string{"l1", "l2"}, + } + + mux.HandleFunc("/repos/o/r/issues", func(w http.ResponseWriter, r *http.Request) { + v := new(IssueRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"number":1}`) + }) + + issue, _, err := client.Issues.Create(context.Background(), "o", "r", input) + if err != nil { + t.Errorf("Issues.Create returned error: %v", err) + } + + want := &Issue{Number: Int(1)} + if !reflect.DeepEqual(issue, want) { + t.Errorf("Issues.Create returned %+v, want %+v", issue, want) + } +} + +func TestIssuesService_Create_invalidOwner(t *testing.T) { + _, _, err := client.Issues.Create(context.Background(), "%", "r", nil) + testURLParseError(t, err) +} + +func TestIssuesService_Edit(t *testing.T) { + setup() + defer teardown() + + input := &IssueRequest{Title: String("t")} + + mux.HandleFunc("/repos/o/r/issues/1", func(w http.ResponseWriter, r *http.Request) { + v := new(IssueRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"number":1}`) + }) + + issue, _, err := client.Issues.Edit(context.Background(), "o", "r", 1, input) + if err != nil { + t.Errorf("Issues.Edit returned error: %v", err) + } + + want := &Issue{Number: Int(1)} + if !reflect.DeepEqual(issue, want) { + t.Errorf("Issues.Edit returned %+v, want %+v", issue, want) + } +} + +func TestIssuesService_Edit_invalidOwner(t *testing.T) { + _, _, err := client.Issues.Edit(context.Background(), "%", "r", 1, nil) + testURLParseError(t, err) +} + +func TestIssuesService_Lock(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/1/lock", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PUT") + + w.WriteHeader(http.StatusNoContent) + }) + + if _, err := client.Issues.Lock(context.Background(), "o", "r", 1); err != nil { + t.Errorf("Issues.Lock returned error: %v", err) + } +} + +func TestIssuesService_Unlock(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/1/lock", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + + w.WriteHeader(http.StatusNoContent) + }) + + if _, err := client.Issues.Unlock(context.Background(), "o", "r", 1); err != nil { + t.Errorf("Issues.Unlock returned error: %v", err) + } +} diff --git a/vendor/github.com/google/go-github/github/issues_timeline_test.go b/vendor/github.com/google/go-github/github/issues_timeline_test.go new file mode 100644 index 000000000000..2fd22109b06b --- /dev/null +++ b/vendor/github.com/google/go-github/github/issues_timeline_test.go @@ -0,0 +1,40 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestIssuesService_ListIssueTimeline(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/1/timeline", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeTimelinePreview) + testFormValues(t, r, values{ + "page": "1", + "per_page": "2", + }) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 1, PerPage: 2} + events, _, err := client.Issues.ListIssueTimeline(context.Background(), "o", "r", 1, opt) + if err != nil { + t.Errorf("Issues.ListIssueTimeline returned error: %v", err) + } + + want := []*Timeline{{ID: Int(1)}} + if !reflect.DeepEqual(events, want) { + t.Errorf("Issues.ListIssueTimeline = %+v, want %+v", events, want) + } +} diff --git a/vendor/github.com/google/go-github/github/licenses_test.go b/vendor/github.com/google/go-github/github/licenses_test.go new file mode 100644 index 000000000000..5c06e6598543 --- /dev/null +++ b/vendor/github.com/google/go-github/github/licenses_test.go @@ -0,0 +1,67 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestLicensesService_List(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/licenses", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeLicensesPreview) + fmt.Fprint(w, `[{"key":"mit","name":"MIT","spdx_id":"MIT","url":"https://api.github.com/licenses/mit","featured":true}]`) + }) + + licenses, _, err := client.Licenses.List(context.Background()) + if err != nil { + t.Errorf("Licenses.List returned error: %v", err) + } + + want := []*License{{ + Key: String("mit"), + Name: String("MIT"), + SPDXID: String("MIT"), + URL: String("https://api.github.com/licenses/mit"), + Featured: Bool(true), + }} + if !reflect.DeepEqual(licenses, want) { + t.Errorf("Licenses.List returned %+v, want %+v", licenses, want) + } +} + +func TestLicensesService_Get(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/licenses/mit", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeLicensesPreview) + fmt.Fprint(w, `{"key":"mit","name":"MIT"}`) + }) + + license, _, err := client.Licenses.Get(context.Background(), "mit") + if err != nil { + t.Errorf("Licenses.Get returned error: %v", err) + } + + want := &License{Key: String("mit"), Name: String("MIT")} + if !reflect.DeepEqual(license, want) { + t.Errorf("Licenses.Get returned %+v, want %+v", license, want) + } +} + +func TestLicensesService_Get_invalidTemplate(t *testing.T) { + _, _, err := client.Licenses.Get(context.Background(), "%") + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/messages_test.go b/vendor/github.com/google/go-github/github/messages_test.go new file mode 100644 index 000000000000..5f348f2aa6f3 --- /dev/null +++ b/vendor/github.com/google/go-github/github/messages_test.go @@ -0,0 +1,252 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "bytes" + "encoding/json" + "net/http" + "reflect" + "testing" +) + +func TestValidatePayload(t *testing.T) { + const defaultBody = `{"yo":true}` // All tests below use the default request body and signature. + const defaultSignature = "sha1=126f2c800419c60137ce748d7672e77b65cf16d6" + secretKey := []byte("0123456789abcdef") + tests := []struct { + signature string + eventID string + event string + wantEventID string + wantEvent string + wantPayload string + }{ + // The following tests generate expected errors: + {}, // Missing signature + {signature: "yo"}, // Missing signature prefix + {signature: "sha1=yo"}, // Signature not hex string + {signature: "sha1=012345"}, // Invalid signature + // The following tests expect err=nil: + { + signature: defaultSignature, + eventID: "dead-beef", + event: "ping", + wantEventID: "dead-beef", + wantEvent: "ping", + wantPayload: defaultBody, + }, + { + signature: defaultSignature, + event: "ping", + wantEvent: "ping", + wantPayload: defaultBody, + }, + { + signature: "sha256=b1f8020f5b4cd42042f807dd939015c4a418bc1ff7f604dd55b0a19b5d953d9b", + event: "ping", + wantEvent: "ping", + wantPayload: defaultBody, + }, + { + signature: "sha512=8456767023c1195682e182a23b3f5d19150ecea598fde8cb85918f7281b16079471b1329f92b912c4d8bd7455cb159777db8f29608b20c7c87323ba65ae62e1f", + event: "ping", + wantEvent: "ping", + wantPayload: defaultBody, + }, + } + + for _, test := range tests { + buf := bytes.NewBufferString(defaultBody) + req, err := http.NewRequest("GET", "http://localhost/event", buf) + if err != nil { + t.Fatalf("NewRequest: %v", err) + } + if test.signature != "" { + req.Header.Set(signatureHeader, test.signature) + } + + got, err := ValidatePayload(req, secretKey) + if err != nil { + if test.wantPayload != "" { + t.Errorf("ValidatePayload(%#v): err = %v, want nil", test, err) + } + continue + } + if string(got) != test.wantPayload { + t.Errorf("ValidatePayload = %q, want %q", got, test.wantPayload) + } + } +} + +func TestParseWebHook(t *testing.T) { + tests := []struct { + payload interface{} + messageType string + }{ + { + payload: &CommitCommentEvent{}, + messageType: "commit_comment", + }, + { + payload: &CreateEvent{}, + messageType: "create", + }, + { + payload: &DeleteEvent{}, + messageType: "delete", + }, + { + payload: &DeploymentEvent{}, + messageType: "deployment", + }, + + { + payload: &DeploymentStatusEvent{}, + messageType: "deployment_status", + }, + { + payload: &ForkEvent{}, + messageType: "fork", + }, + { + payload: &GollumEvent{}, + messageType: "gollum", + }, + { + payload: &InstallationEvent{}, + messageType: "installation", + }, + { + payload: &InstallationRepositoriesEvent{}, + messageType: "installation_repositories", + }, + { + payload: &IssueCommentEvent{}, + messageType: "issue_comment", + }, + { + payload: &IssuesEvent{}, + messageType: "issues", + }, + { + payload: &LabelEvent{}, + messageType: "label", + }, + { + payload: &MemberEvent{}, + messageType: "member", + }, + { + payload: &MembershipEvent{}, + messageType: "membership", + }, + { + payload: &MilestoneEvent{}, + messageType: "milestone", + }, + { + payload: &OrganizationEvent{}, + messageType: "organization", + }, + { + payload: &OrgBlockEvent{}, + messageType: "org_block", + }, + { + payload: &PageBuildEvent{}, + messageType: "page_build", + }, + { + payload: &PingEvent{}, + messageType: "ping", + }, + { + payload: &ProjectEvent{}, + messageType: "project", + }, + { + payload: &ProjectCardEvent{}, + messageType: "project_card", + }, + { + payload: &ProjectColumnEvent{}, + messageType: "project_column", + }, + { + payload: &PublicEvent{}, + messageType: "public", + }, + { + payload: &PullRequestEvent{}, + messageType: "pull_request", + }, + { + payload: &PullRequestReviewEvent{}, + messageType: "pull_request_review", + }, + { + payload: &PullRequestReviewCommentEvent{}, + messageType: "pull_request_review_comment", + }, + { + payload: &PushEvent{}, + messageType: "push", + }, + { + payload: &ReleaseEvent{}, + messageType: "release", + }, + { + payload: &RepositoryEvent{}, + messageType: "repository", + }, + { + payload: &StatusEvent{}, + messageType: "status", + }, + { + payload: &TeamEvent{}, + messageType: "team", + }, + { + payload: &TeamAddEvent{}, + messageType: "team_add", + }, + { + payload: &WatchEvent{}, + messageType: "watch", + }, + } + + for _, test := range tests { + p, err := json.Marshal(test.payload) + if err != nil { + t.Fatalf("Marshal(%#v): %v", test.payload, err) + } + got, err := ParseWebHook(test.messageType, p) + if err != nil { + t.Fatalf("ParseWebHook: %v", err) + } + if want := test.payload; !reflect.DeepEqual(got, want) { + t.Errorf("ParseWebHook(%#v, %#v) = %#v, want %#v", test.messageType, p, got, want) + } + } +} + +func TestDeliveryID(t *testing.T) { + id := "8970a780-244e-11e7-91ca-da3aabcb9793" + req, err := http.NewRequest("POST", "http://localhost", nil) + if err != nil { + t.Fatalf("DeliveryID: %v", err) + } + req.Header.Set("X-Github-Delivery", id) + + got := DeliveryID(req) + if got != id { + t.Errorf("DeliveryID(%#v) = %q, want %q", req, got, id) + } +} diff --git a/vendor/github.com/google/go-github/github/migrations_source_import_test.go b/vendor/github.com/google/go-github/github/migrations_source_import_test.go new file mode 100644 index 000000000000..67b98fb42b90 --- /dev/null +++ b/vendor/github.com/google/go-github/github/migrations_source_import_test.go @@ -0,0 +1,226 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestMigrationService_StartImport(t *testing.T) { + setup() + defer teardown() + + input := &Import{ + VCS: String("git"), + VCSURL: String("url"), + VCSUsername: String("u"), + VCSPassword: String("p"), + } + + mux.HandleFunc("/repos/o/r/import", func(w http.ResponseWriter, r *http.Request) { + v := new(Import) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PUT") + testHeader(t, r, "Accept", mediaTypeImportPreview) + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + w.WriteHeader(http.StatusCreated) + fmt.Fprint(w, `{"status":"importing"}`) + }) + + got, _, err := client.Migrations.StartImport(context.Background(), "o", "r", input) + if err != nil { + t.Errorf("StartImport returned error: %v", err) + } + want := &Import{Status: String("importing")} + if !reflect.DeepEqual(got, want) { + t.Errorf("StartImport = %+v, want %+v", got, want) + } +} + +func TestMigrationService_ImportProgress(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/import", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeImportPreview) + fmt.Fprint(w, `{"status":"complete"}`) + }) + + got, _, err := client.Migrations.ImportProgress(context.Background(), "o", "r") + if err != nil { + t.Errorf("ImportProgress returned error: %v", err) + } + want := &Import{Status: String("complete")} + if !reflect.DeepEqual(got, want) { + t.Errorf("ImportProgress = %+v, want %+v", got, want) + } +} + +func TestMigrationService_UpdateImport(t *testing.T) { + setup() + defer teardown() + + input := &Import{ + VCS: String("git"), + VCSURL: String("url"), + VCSUsername: String("u"), + VCSPassword: String("p"), + } + + mux.HandleFunc("/repos/o/r/import", func(w http.ResponseWriter, r *http.Request) { + v := new(Import) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + testHeader(t, r, "Accept", mediaTypeImportPreview) + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + w.WriteHeader(http.StatusCreated) + fmt.Fprint(w, `{"status":"importing"}`) + }) + + got, _, err := client.Migrations.UpdateImport(context.Background(), "o", "r", input) + if err != nil { + t.Errorf("UpdateImport returned error: %v", err) + } + want := &Import{Status: String("importing")} + if !reflect.DeepEqual(got, want) { + t.Errorf("UpdateImport = %+v, want %+v", got, want) + } +} + +func TestMigrationService_CommitAuthors(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/import/authors", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeImportPreview) + fmt.Fprint(w, `[{"id":1,"name":"a"},{"id":2,"name":"b"}]`) + }) + + got, _, err := client.Migrations.CommitAuthors(context.Background(), "o", "r") + if err != nil { + t.Errorf("CommitAuthors returned error: %v", err) + } + want := []*SourceImportAuthor{ + {ID: Int(1), Name: String("a")}, + {ID: Int(2), Name: String("b")}, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("CommitAuthors = %+v, want %+v", got, want) + } +} + +func TestMigrationService_MapCommitAuthor(t *testing.T) { + setup() + defer teardown() + + input := &SourceImportAuthor{Name: String("n"), Email: String("e")} + + mux.HandleFunc("/repos/o/r/import/authors/1", func(w http.ResponseWriter, r *http.Request) { + v := new(SourceImportAuthor) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + testHeader(t, r, "Accept", mediaTypeImportPreview) + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id": 1}`) + }) + + got, _, err := client.Migrations.MapCommitAuthor(context.Background(), "o", "r", 1, input) + if err != nil { + t.Errorf("MapCommitAuthor returned error: %v", err) + } + want := &SourceImportAuthor{ID: Int(1)} + if !reflect.DeepEqual(got, want) { + t.Errorf("MapCommitAuthor = %+v, want %+v", got, want) + } +} + +func TestMigrationService_SetLFSPreference(t *testing.T) { + setup() + defer teardown() + + input := &Import{UseLFS: String("opt_in")} + + mux.HandleFunc("/repos/o/r/import/lfs", func(w http.ResponseWriter, r *http.Request) { + v := new(Import) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + testHeader(t, r, "Accept", mediaTypeImportPreview) + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + w.WriteHeader(http.StatusCreated) + fmt.Fprint(w, `{"status":"importing"}`) + }) + + got, _, err := client.Migrations.SetLFSPreference(context.Background(), "o", "r", input) + if err != nil { + t.Errorf("SetLFSPreference returned error: %v", err) + } + want := &Import{Status: String("importing")} + if !reflect.DeepEqual(got, want) { + t.Errorf("SetLFSPreference = %+v, want %+v", got, want) + } +} + +func TestMigrationService_LargeFiles(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/import/large_files", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeImportPreview) + fmt.Fprint(w, `[{"oid":"a"},{"oid":"b"}]`) + }) + + got, _, err := client.Migrations.LargeFiles(context.Background(), "o", "r") + if err != nil { + t.Errorf("LargeFiles returned error: %v", err) + } + want := []*LargeFile{ + {OID: String("a")}, + {OID: String("b")}, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("LargeFiles = %+v, want %+v", got, want) + } +} + +func TestMigrationService_CancelImport(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/import", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + testHeader(t, r, "Accept", mediaTypeImportPreview) + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Migrations.CancelImport(context.Background(), "o", "r") + if err != nil { + t.Errorf("CancelImport returned error: %v", err) + } +} diff --git a/vendor/github.com/google/go-github/github/migrations_test.go b/vendor/github.com/google/go-github/github/migrations_test.go new file mode 100644 index 000000000000..7e0ac0f75069 --- /dev/null +++ b/vendor/github.com/google/go-github/github/migrations_test.go @@ -0,0 +1,178 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "strings" + "testing" +) + +func TestMigrationService_StartMigration(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/migrations", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + testHeader(t, r, "Accept", mediaTypeMigrationsPreview) + + w.WriteHeader(http.StatusCreated) + w.Write(migrationJSON) + }) + + opt := &MigrationOptions{ + LockRepositories: true, + ExcludeAttachments: false, + } + got, _, err := client.Migrations.StartMigration(context.Background(), "o", []string{"r"}, opt) + if err != nil { + t.Errorf("StartMigration returned error: %v", err) + } + if want := wantMigration; !reflect.DeepEqual(got, want) { + t.Errorf("StartMigration = %+v, want %+v", got, want) + } +} + +func TestMigrationService_ListMigrations(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/migrations", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeMigrationsPreview) + + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf("[%s]", migrationJSON))) + }) + + got, _, err := client.Migrations.ListMigrations(context.Background(), "o") + if err != nil { + t.Errorf("ListMigrations returned error: %v", err) + } + if want := []*Migration{wantMigration}; !reflect.DeepEqual(got, want) { + t.Errorf("ListMigrations = %+v, want %+v", got, want) + } +} + +func TestMigrationService_MigrationStatus(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/migrations/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeMigrationsPreview) + + w.WriteHeader(http.StatusOK) + w.Write(migrationJSON) + }) + + got, _, err := client.Migrations.MigrationStatus(context.Background(), "o", 1) + if err != nil { + t.Errorf("MigrationStatus returned error: %v", err) + } + if want := wantMigration; !reflect.DeepEqual(got, want) { + t.Errorf("MigrationStatus = %+v, want %+v", got, want) + } +} + +func TestMigrationService_MigrationArchiveURL(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/migrations/1/archive", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeMigrationsPreview) + + http.Redirect(w, r, "/yo", http.StatusFound) + }) + mux.HandleFunc("/yo", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + + w.WriteHeader(http.StatusOK) + w.Write([]byte("0123456789abcdef")) + }) + + got, err := client.Migrations.MigrationArchiveURL(context.Background(), "o", 1) + if err != nil { + t.Errorf("MigrationStatus returned error: %v", err) + } + if want := "/yo"; !strings.HasSuffix(got, want) { + t.Errorf("MigrationArchiveURL = %+v, want %+v", got, want) + } +} + +func TestMigrationService_DeleteMigration(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/migrations/1/archive", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + testHeader(t, r, "Accept", mediaTypeMigrationsPreview) + + w.WriteHeader(http.StatusNoContent) + }) + + if _, err := client.Migrations.DeleteMigration(context.Background(), "o", 1); err != nil { + t.Errorf("DeleteMigration returned error: %v", err) + } +} + +func TestMigrationService_UnlockRepo(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/migrations/1/repos/r/lock", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + testHeader(t, r, "Accept", mediaTypeMigrationsPreview) + + w.WriteHeader(http.StatusNoContent) + }) + + if _, err := client.Migrations.UnlockRepo(context.Background(), "o", 1, "r"); err != nil { + t.Errorf("UnlockRepo returned error: %v", err) + } +} + +var migrationJSON = []byte(`{ + "id": 79, + "guid": "0b989ba4-242f-11e5-81e1-c7b6966d2516", + "state": "pending", + "lock_repositories": true, + "exclude_attachments": false, + "url": "https://api.github.com/orgs/octo-org/migrations/79", + "created_at": "2015-07-06T15:33:38-07:00", + "updated_at": "2015-07-06T15:33:38-07:00", + "repositories": [ + { + "id": 1296269, + "name": "Hello-World", + "full_name": "octocat/Hello-World", + "description": "This your first repo!" + } + ] +}`) + +var wantMigration = &Migration{ + ID: Int(79), + GUID: String("0b989ba4-242f-11e5-81e1-c7b6966d2516"), + State: String("pending"), + LockRepositories: Bool(true), + ExcludeAttachments: Bool(false), + URL: String("https://api.github.com/orgs/octo-org/migrations/79"), + CreatedAt: String("2015-07-06T15:33:38-07:00"), + UpdatedAt: String("2015-07-06T15:33:38-07:00"), + Repositories: []*Repository{ + { + ID: Int(1296269), + Name: String("Hello-World"), + FullName: String("octocat/Hello-World"), + Description: String("This your first repo!"), + }, + }, +} diff --git a/vendor/github.com/google/go-github/github/misc_test.go b/vendor/github.com/google/go-github/github/misc_test.go new file mode 100644 index 000000000000..f2b2df44b5e9 --- /dev/null +++ b/vendor/github.com/google/go-github/github/misc_test.go @@ -0,0 +1,232 @@ +// Copyright 2014 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestMarkdown(t *testing.T) { + setup() + defer teardown() + + input := &markdownRequest{ + Text: String("# text #"), + Mode: String("gfm"), + Context: String("google/go-github"), + } + mux.HandleFunc("/markdown", func(w http.ResponseWriter, r *http.Request) { + v := new(markdownRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + fmt.Fprint(w, `

text

`) + }) + + md, _, err := client.Markdown(context.Background(), "# text #", &MarkdownOptions{ + Mode: "gfm", + Context: "google/go-github", + }) + if err != nil { + t.Errorf("Markdown returned error: %v", err) + } + + if want := "

text

"; want != md { + t.Errorf("Markdown returned %+v, want %+v", md, want) + } +} + +func TestListEmojis(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/emojis", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"+1": "+1.png"}`) + }) + + emoji, _, err := client.ListEmojis(context.Background()) + if err != nil { + t.Errorf("ListEmojis returned error: %v", err) + } + + want := map[string]string{"+1": "+1.png"} + if !reflect.DeepEqual(want, emoji) { + t.Errorf("ListEmojis returned %+v, want %+v", emoji, want) + } +} + +func TestListCodesOfConduct(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/codes_of_conduct", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeCodesOfConductPreview) + fmt.Fprint(w, `[{ + "key": "key", + "name": "name", + "url": "url"} + ]`) + }) + + cs, _, err := client.ListCodesOfConduct(context.Background()) + if err != nil { + t.Errorf("ListCodesOfConduct returned error: %v", err) + } + + want := []*CodeOfConduct{ + { + Key: String("key"), + Name: String("name"), + URL: String("url"), + }} + if !reflect.DeepEqual(want, cs) { + t.Errorf("ListCodesOfConduct returned %+v, want %+v", cs, want) + } +} + +func TestGetCodeOfConduct(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/codes_of_conduct/k", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeCodesOfConductPreview) + fmt.Fprint(w, `{ + "key": "key", + "name": "name", + "url": "url", + "body": "body"}`, + ) + }) + + coc, _, err := client.GetCodeOfConduct(context.Background(), "k") + if err != nil { + t.Errorf("ListCodesOfConduct returned error: %v", err) + } + + want := &CodeOfConduct{ + Key: String("key"), + Name: String("name"), + URL: String("url"), + Body: String("body"), + } + if !reflect.DeepEqual(want, coc) { + t.Errorf("GetCodeOfConductByKey returned %+v, want %+v", coc, want) + } +} + +func TestAPIMeta(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/meta", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"hooks":["h"], "git":["g"], "pages":["p"], "verifiable_password_authentication": true}`) + }) + + meta, _, err := client.APIMeta(context.Background()) + if err != nil { + t.Errorf("APIMeta returned error: %v", err) + } + + want := &APIMeta{ + Hooks: []string{"h"}, + Git: []string{"g"}, + Pages: []string{"p"}, + VerifiablePasswordAuthentication: Bool(true), + } + if !reflect.DeepEqual(want, meta) { + t.Errorf("APIMeta returned %+v, want %+v", meta, want) + } +} + +func TestOctocat(t *testing.T) { + setup() + defer teardown() + + input := "input" + output := "sample text" + + mux.HandleFunc("/octocat", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"s": input}) + w.Header().Set("Content-Type", "application/octocat-stream") + fmt.Fprint(w, output) + }) + + got, _, err := client.Octocat(context.Background(), input) + if err != nil { + t.Errorf("Octocat returned error: %v", err) + } + + if want := output; got != want { + t.Errorf("Octocat returned %+v, want %+v", got, want) + } +} + +func TestZen(t *testing.T) { + setup() + defer teardown() + + output := "sample text" + + mux.HandleFunc("/zen", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.Header().Set("Content-Type", "text/plain;charset=utf-8") + fmt.Fprint(w, output) + }) + + got, _, err := client.Zen(context.Background()) + if err != nil { + t.Errorf("Zen returned error: %v", err) + } + + if want := output; got != want { + t.Errorf("Zen returned %+v, want %+v", got, want) + } +} + +func TestListServiceHooks(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/hooks", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[{ + "name":"n", + "events":["e"], + "supported_events":["s"], + "schema":[ + ["a", "b"] + ] + }]`) + }) + + hooks, _, err := client.ListServiceHooks(context.Background()) + if err != nil { + t.Errorf("ListServiceHooks returned error: %v", err) + } + + want := []*ServiceHook{{ + Name: String("n"), + Events: []string{"e"}, + SupportedEvents: []string{"s"}, + Schema: [][]string{{"a", "b"}}, + }} + if !reflect.DeepEqual(hooks, want) { + t.Errorf("ListServiceHooks returned %+v, want %+v", hooks, want) + } +} diff --git a/vendor/github.com/google/go-github/github/orgs_hooks_test.go b/vendor/github.com/google/go-github/github/orgs_hooks_test.go new file mode 100644 index 000000000000..69e75f9f4ac1 --- /dev/null +++ b/vendor/github.com/google/go-github/github/orgs_hooks_test.go @@ -0,0 +1,135 @@ +// Copyright 2015 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestOrganizationsService_ListHooks(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/hooks", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}, {"id":2}]`) + }) + + opt := &ListOptions{Page: 2} + + hooks, _, err := client.Organizations.ListHooks(context.Background(), "o", opt) + if err != nil { + t.Errorf("Organizations.ListHooks returned error: %v", err) + } + + want := []*Hook{{ID: Int(1)}, {ID: Int(2)}} + if !reflect.DeepEqual(hooks, want) { + t.Errorf("Organizations.ListHooks returned %+v, want %+v", hooks, want) + } +} + +func TestOrganizationsService_ListHooks_invalidOrg(t *testing.T) { + _, _, err := client.Organizations.ListHooks(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestOrganizationsService_GetHook(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/hooks/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":1}`) + }) + + hook, _, err := client.Organizations.GetHook(context.Background(), "o", 1) + if err != nil { + t.Errorf("Organizations.GetHook returned error: %v", err) + } + + want := &Hook{ID: Int(1)} + if !reflect.DeepEqual(hook, want) { + t.Errorf("Organizations.GetHook returned %+v, want %+v", hook, want) + } +} + +func TestOrganizationsService_GetHook_invalidOrg(t *testing.T) { + _, _, err := client.Organizations.GetHook(context.Background(), "%", 1) + testURLParseError(t, err) +} + +func TestOrganizationsService_EditHook(t *testing.T) { + setup() + defer teardown() + + input := &Hook{Name: String("t")} + + mux.HandleFunc("/orgs/o/hooks/1", func(w http.ResponseWriter, r *http.Request) { + v := new(Hook) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + hook, _, err := client.Organizations.EditHook(context.Background(), "o", 1, input) + if err != nil { + t.Errorf("Organizations.EditHook returned error: %v", err) + } + + want := &Hook{ID: Int(1)} + if !reflect.DeepEqual(hook, want) { + t.Errorf("Organizations.EditHook returned %+v, want %+v", hook, want) + } +} + +func TestOrganizationsService_EditHook_invalidOrg(t *testing.T) { + _, _, err := client.Organizations.EditHook(context.Background(), "%", 1, nil) + testURLParseError(t, err) +} + +func TestOrganizationsService_PingHook(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/hooks/1/pings", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + }) + + _, err := client.Organizations.PingHook(context.Background(), "o", 1) + if err != nil { + t.Errorf("Organizations.PingHook returned error: %v", err) + } +} + +func TestOrganizationsService_DeleteHook(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/hooks/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Organizations.DeleteHook(context.Background(), "o", 1) + if err != nil { + t.Errorf("Organizations.DeleteHook returned error: %v", err) + } +} + +func TestOrganizationsService_DeleteHook_invalidOrg(t *testing.T) { + _, err := client.Organizations.DeleteHook(context.Background(), "%", 1) + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/orgs_members_test.go b/vendor/github.com/google/go-github/github/orgs_members_test.go new file mode 100644 index 000000000000..a17c51865df6 --- /dev/null +++ b/vendor/github.com/google/go-github/github/orgs_members_test.go @@ -0,0 +1,434 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" + "time" +) + +func TestOrganizationsService_ListMembers(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/members", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "filter": "2fa_disabled", + "role": "admin", + "page": "2", + }) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListMembersOptions{ + PublicOnly: false, + Filter: "2fa_disabled", + Role: "admin", + ListOptions: ListOptions{Page: 2}, + } + members, _, err := client.Organizations.ListMembers(context.Background(), "o", opt) + if err != nil { + t.Errorf("Organizations.ListMembers returned error: %v", err) + } + + want := []*User{{ID: Int(1)}} + if !reflect.DeepEqual(members, want) { + t.Errorf("Organizations.ListMembers returned %+v, want %+v", members, want) + } +} + +func TestOrganizationsService_ListMembers_invalidOrg(t *testing.T) { + _, _, err := client.Organizations.ListMembers(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestOrganizationsService_ListMembers_public(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/public_members", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListMembersOptions{PublicOnly: true} + members, _, err := client.Organizations.ListMembers(context.Background(), "o", opt) + if err != nil { + t.Errorf("Organizations.ListMembers returned error: %v", err) + } + + want := []*User{{ID: Int(1)}} + if !reflect.DeepEqual(members, want) { + t.Errorf("Organizations.ListMembers returned %+v, want %+v", members, want) + } +} + +func TestOrganizationsService_IsMember(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/members/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNoContent) + }) + + member, _, err := client.Organizations.IsMember(context.Background(), "o", "u") + if err != nil { + t.Errorf("Organizations.IsMember returned error: %v", err) + } + if want := true; member != want { + t.Errorf("Organizations.IsMember returned %+v, want %+v", member, want) + } +} + +// ensure that a 404 response is interpreted as "false" and not an error +func TestOrganizationsService_IsMember_notMember(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/members/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNotFound) + }) + + member, _, err := client.Organizations.IsMember(context.Background(), "o", "u") + if err != nil { + t.Errorf("Organizations.IsMember returned error: %+v", err) + } + if want := false; member != want { + t.Errorf("Organizations.IsMember returned %+v, want %+v", member, want) + } +} + +// ensure that a 400 response is interpreted as an actual error, and not simply +// as "false" like the above case of a 404 +func TestOrganizationsService_IsMember_error(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/members/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + http.Error(w, "BadRequest", http.StatusBadRequest) + }) + + member, _, err := client.Organizations.IsMember(context.Background(), "o", "u") + if err == nil { + t.Errorf("Expected HTTP 400 response") + } + if want := false; member != want { + t.Errorf("Organizations.IsMember returned %+v, want %+v", member, want) + } +} + +func TestOrganizationsService_IsMember_invalidOrg(t *testing.T) { + _, _, err := client.Organizations.IsMember(context.Background(), "%", "u") + testURLParseError(t, err) +} + +func TestOrganizationsService_IsPublicMember(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/public_members/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNoContent) + }) + + member, _, err := client.Organizations.IsPublicMember(context.Background(), "o", "u") + if err != nil { + t.Errorf("Organizations.IsPublicMember returned error: %v", err) + } + if want := true; member != want { + t.Errorf("Organizations.IsPublicMember returned %+v, want %+v", member, want) + } +} + +// ensure that a 404 response is interpreted as "false" and not an error +func TestOrganizationsService_IsPublicMember_notMember(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/public_members/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNotFound) + }) + + member, _, err := client.Organizations.IsPublicMember(context.Background(), "o", "u") + if err != nil { + t.Errorf("Organizations.IsPublicMember returned error: %v", err) + } + if want := false; member != want { + t.Errorf("Organizations.IsPublicMember returned %+v, want %+v", member, want) + } +} + +// ensure that a 400 response is interpreted as an actual error, and not simply +// as "false" like the above case of a 404 +func TestOrganizationsService_IsPublicMember_error(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/public_members/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + http.Error(w, "BadRequest", http.StatusBadRequest) + }) + + member, _, err := client.Organizations.IsPublicMember(context.Background(), "o", "u") + if err == nil { + t.Errorf("Expected HTTP 400 response") + } + if want := false; member != want { + t.Errorf("Organizations.IsPublicMember returned %+v, want %+v", member, want) + } +} + +func TestOrganizationsService_IsPublicMember_invalidOrg(t *testing.T) { + _, _, err := client.Organizations.IsPublicMember(context.Background(), "%", "u") + testURLParseError(t, err) +} + +func TestOrganizationsService_RemoveMember(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/members/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Organizations.RemoveMember(context.Background(), "o", "u") + if err != nil { + t.Errorf("Organizations.RemoveMember returned error: %v", err) + } +} + +func TestOrganizationsService_RemoveMember_invalidOrg(t *testing.T) { + _, err := client.Organizations.RemoveMember(context.Background(), "%", "u") + testURLParseError(t, err) +} + +func TestOrganizationsService_ListOrgMemberships(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/memberships/orgs", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "state": "active", + "page": "2", + }) + fmt.Fprint(w, `[{"url":"u"}]`) + }) + + opt := &ListOrgMembershipsOptions{ + State: "active", + ListOptions: ListOptions{Page: 2}, + } + memberships, _, err := client.Organizations.ListOrgMemberships(context.Background(), opt) + if err != nil { + t.Errorf("Organizations.ListOrgMemberships returned error: %v", err) + } + + want := []*Membership{{URL: String("u")}} + if !reflect.DeepEqual(memberships, want) { + t.Errorf("Organizations.ListOrgMemberships returned %+v, want %+v", memberships, want) + } +} + +func TestOrganizationsService_GetOrgMembership_AuthenticatedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/memberships/orgs/o", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"url":"u"}`) + }) + + membership, _, err := client.Organizations.GetOrgMembership(context.Background(), "", "o") + if err != nil { + t.Errorf("Organizations.GetOrgMembership returned error: %v", err) + } + + want := &Membership{URL: String("u")} + if !reflect.DeepEqual(membership, want) { + t.Errorf("Organizations.GetOrgMembership returned %+v, want %+v", membership, want) + } +} + +func TestOrganizationsService_GetOrgMembership_SpecifiedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/memberships/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"url":"u"}`) + }) + + membership, _, err := client.Organizations.GetOrgMembership(context.Background(), "u", "o") + if err != nil { + t.Errorf("Organizations.GetOrgMembership returned error: %v", err) + } + + want := &Membership{URL: String("u")} + if !reflect.DeepEqual(membership, want) { + t.Errorf("Organizations.GetOrgMembership returned %+v, want %+v", membership, want) + } +} + +func TestOrganizationsService_EditOrgMembership_AuthenticatedUser(t *testing.T) { + setup() + defer teardown() + + input := &Membership{State: String("active")} + + mux.HandleFunc("/user/memberships/orgs/o", func(w http.ResponseWriter, r *http.Request) { + v := new(Membership) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"url":"u"}`) + }) + + membership, _, err := client.Organizations.EditOrgMembership(context.Background(), "", "o", input) + if err != nil { + t.Errorf("Organizations.EditOrgMembership returned error: %v", err) + } + + want := &Membership{URL: String("u")} + if !reflect.DeepEqual(membership, want) { + t.Errorf("Organizations.EditOrgMembership returned %+v, want %+v", membership, want) + } +} + +func TestOrganizationsService_EditOrgMembership_SpecifiedUser(t *testing.T) { + setup() + defer teardown() + + input := &Membership{State: String("active")} + + mux.HandleFunc("/orgs/o/memberships/u", func(w http.ResponseWriter, r *http.Request) { + v := new(Membership) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PUT") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"url":"u"}`) + }) + + membership, _, err := client.Organizations.EditOrgMembership(context.Background(), "u", "o", input) + if err != nil { + t.Errorf("Organizations.EditOrgMembership returned error: %v", err) + } + + want := &Membership{URL: String("u")} + if !reflect.DeepEqual(membership, want) { + t.Errorf("Organizations.EditOrgMembership returned %+v, want %+v", membership, want) + } +} + +func TestOrganizationsService_RemoveOrgMembership(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/memberships/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Organizations.RemoveOrgMembership(context.Background(), "u", "o") + if err != nil { + t.Errorf("Organizations.RemoveOrgMembership returned error: %v", err) + } +} + +func TestOrganizationsService_ListPendingOrgInvitations(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/1/invitations", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "1"}) + fmt.Fprint(w, `[ + { + "id": 1, + "login": "monalisa", + "email": "octocat@github.com", + "role": "direct_member", + "created_at": "2017-01-21T00:00:00Z", + "inviter": { + "login": "other_user", + "id": 1, + "avatar_url": "https://github.com/images/error/other_user_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/other_user", + "html_url": "https://github.com/other_user", + "followers_url": "https://api.github.com/users/other_user/followers", + "following_url": "https://api.github.com/users/other_user/following/other_user", + "gists_url": "https://api.github.com/users/other_user/gists/gist_id", + "starred_url": "https://api.github.com/users/other_user/starred/owner/repo", + "subscriptions_url": "https://api.github.com/users/other_user/subscriptions", + "organizations_url": "https://api.github.com/users/other_user/orgs", + "repos_url": "https://api.github.com/users/other_user/repos", + "events_url": "https://api.github.com/users/other_user/events/privacy", + "received_events_url": "https://api.github.com/users/other_user/received_events/privacy", + "type": "User", + "site_admin": false + } + } + ]`) + }) + + opt := &ListOptions{Page: 1} + invitations, _, err := client.Organizations.ListPendingOrgInvitations(context.Background(), 1, opt) + if err != nil { + t.Errorf("Organizations.ListPendingOrgInvitations returned error: %v", err) + } + + createdAt := time.Date(2017, 01, 21, 0, 0, 0, 0, time.UTC) + want := []*Invitation{ + { + ID: Int(1), + Login: String("monalisa"), + Email: String("octocat@github.com"), + Role: String("direct_member"), + CreatedAt: &createdAt, + Inviter: &User{ + Login: String("other_user"), + ID: Int(1), + AvatarURL: String("https://github.com/images/error/other_user_happy.gif"), + GravatarID: String(""), + URL: String("https://api.github.com/users/other_user"), + HTMLURL: String("https://github.com/other_user"), + FollowersURL: String("https://api.github.com/users/other_user/followers"), + FollowingURL: String("https://api.github.com/users/other_user/following/other_user"), + GistsURL: String("https://api.github.com/users/other_user/gists/gist_id"), + StarredURL: String("https://api.github.com/users/other_user/starred/owner/repo"), + SubscriptionsURL: String("https://api.github.com/users/other_user/subscriptions"), + OrganizationsURL: String("https://api.github.com/users/other_user/orgs"), + ReposURL: String("https://api.github.com/users/other_user/repos"), + EventsURL: String("https://api.github.com/users/other_user/events/privacy"), + ReceivedEventsURL: String("https://api.github.com/users/other_user/received_events/privacy"), + Type: String("User"), + SiteAdmin: Bool(false), + }, + }} + + if !reflect.DeepEqual(invitations, want) { + t.Errorf("Organizations.ListPendingOrgInvitations returned %+v, want %+v", invitations, want) + } +} diff --git a/vendor/github.com/google/go-github/github/orgs_outside_collaborators_test.go b/vendor/github.com/google/go-github/github/orgs_outside_collaborators_test.go new file mode 100644 index 000000000000..536006119b09 --- /dev/null +++ b/vendor/github.com/google/go-github/github/orgs_outside_collaborators_test.go @@ -0,0 +1,47 @@ +// Copyright 2017 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestOrganizationsService_ListOutsideCollaborators(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/outside_collaborators", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "filter": "2fa_disabled", + "page": "2", + }) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOutsideCollaboratorsOptions{ + Filter: "2fa_disabled", + ListOptions: ListOptions{Page: 2}, + } + members, _, err := client.Organizations.ListOutsideCollaborators(context.Background(), "o", opt) + if err != nil { + t.Errorf("Organizations.ListOutsideCollaborators returned error: %v", err) + } + + want := []*User{{ID: Int(1)}} + if !reflect.DeepEqual(members, want) { + t.Errorf("Organizations.ListOutsideCollaborators returned %+v, want %+v", members, want) + } +} + +func TestOrganizationsService_ListOutsideCollaborators_invalidOrg(t *testing.T) { + _, _, err := client.Organizations.ListOutsideCollaborators(context.Background(), "%", nil) + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/orgs_projects_test.go b/vendor/github.com/google/go-github/github/orgs_projects_test.go new file mode 100644 index 000000000000..533f691968d5 --- /dev/null +++ b/vendor/github.com/google/go-github/github/orgs_projects_test.go @@ -0,0 +1,68 @@ +// Copyright 2017 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestOrganizationsService_ListProjects(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/projects", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + testFormValues(t, r, values{"state": "open", "page": "2"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ProjectListOptions{State: "open", ListOptions: ListOptions{Page: 2}} + projects, _, err := client.Organizations.ListProjects(context.Background(), "o", opt) + if err != nil { + t.Errorf("Organizations.ListProjects returned error: %v", err) + } + + want := []*Project{{ID: Int(1)}} + if !reflect.DeepEqual(projects, want) { + t.Errorf("Organizations.ListProjects returned %+v, want %+v", projects, want) + } +} + +func TestOrganizationsService_CreateProject(t *testing.T) { + setup() + defer teardown() + + input := &ProjectOptions{Name: "Project Name", Body: "Project body."} + + mux.HandleFunc("/orgs/o/projects", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + + v := &ProjectOptions{} + json.NewDecoder(r.Body).Decode(v) + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + project, _, err := client.Organizations.CreateProject(context.Background(), "o", input) + if err != nil { + t.Errorf("Organizations.CreateProject returned error: %v", err) + } + + want := &Project{ID: Int(1)} + if !reflect.DeepEqual(project, want) { + t.Errorf("Organizations.CreateProject returned %+v, want %+v", project, want) + } +} diff --git a/vendor/github.com/google/go-github/github/orgs_teams_test.go b/vendor/github.com/google/go-github/github/orgs_teams_test.go new file mode 100644 index 000000000000..e9342fb9b794 --- /dev/null +++ b/vendor/github.com/google/go-github/github/orgs_teams_test.go @@ -0,0 +1,580 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" + "time" +) + +func TestOrganizationsService_ListTeams(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/teams", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 2} + teams, _, err := client.Organizations.ListTeams(context.Background(), "o", opt) + if err != nil { + t.Errorf("Organizations.ListTeams returned error: %v", err) + } + + want := []*Team{{ID: Int(1)}} + if !reflect.DeepEqual(teams, want) { + t.Errorf("Organizations.ListTeams returned %+v, want %+v", teams, want) + } +} + +func TestOrganizationsService_ListTeams_invalidOrg(t *testing.T) { + _, _, err := client.Organizations.ListTeams(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestOrganizationsService_GetTeam(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/teams/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":1, "name":"n", "description": "d", "url":"u", "slug": "s", "permission":"p", "ldap_dn":"cn=n,ou=groups,dc=example,dc=com"}`) + }) + + team, _, err := client.Organizations.GetTeam(context.Background(), 1) + if err != nil { + t.Errorf("Organizations.GetTeam returned error: %v", err) + } + + want := &Team{ID: Int(1), Name: String("n"), Description: String("d"), URL: String("u"), Slug: String("s"), Permission: String("p"), LDAPDN: String("cn=n,ou=groups,dc=example,dc=com")} + if !reflect.DeepEqual(team, want) { + t.Errorf("Organizations.GetTeam returned %+v, want %+v", team, want) + } +} + +func TestOrganizationsService_CreateTeam(t *testing.T) { + setup() + defer teardown() + + input := &Team{Name: String("n"), Privacy: String("closed")} + + mux.HandleFunc("/orgs/o/teams", func(w http.ResponseWriter, r *http.Request) { + v := new(Team) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + team, _, err := client.Organizations.CreateTeam(context.Background(), "o", input) + if err != nil { + t.Errorf("Organizations.CreateTeam returned error: %v", err) + } + + want := &Team{ID: Int(1)} + if !reflect.DeepEqual(team, want) { + t.Errorf("Organizations.CreateTeam returned %+v, want %+v", team, want) + } +} + +func TestOrganizationsService_CreateTeam_invalidOrg(t *testing.T) { + _, _, err := client.Organizations.CreateTeam(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestOrganizationsService_EditTeam(t *testing.T) { + setup() + defer teardown() + + input := &Team{Name: String("n"), Privacy: String("closed")} + + mux.HandleFunc("/teams/1", func(w http.ResponseWriter, r *http.Request) { + v := new(Team) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + team, _, err := client.Organizations.EditTeam(context.Background(), 1, input) + if err != nil { + t.Errorf("Organizations.EditTeam returned error: %v", err) + } + + want := &Team{ID: Int(1)} + if !reflect.DeepEqual(team, want) { + t.Errorf("Organizations.EditTeam returned %+v, want %+v", team, want) + } +} + +func TestOrganizationsService_DeleteTeam(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/teams/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Organizations.DeleteTeam(context.Background(), 1) + if err != nil { + t.Errorf("Organizations.DeleteTeam returned error: %v", err) + } +} + +func TestOrganizationsService_ListTeamMembers(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/teams/1/members", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"role": "member", "page": "2"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &OrganizationListTeamMembersOptions{Role: "member", ListOptions: ListOptions{Page: 2}} + members, _, err := client.Organizations.ListTeamMembers(context.Background(), 1, opt) + if err != nil { + t.Errorf("Organizations.ListTeamMembers returned error: %v", err) + } + + want := []*User{{ID: Int(1)}} + if !reflect.DeepEqual(members, want) { + t.Errorf("Organizations.ListTeamMembers returned %+v, want %+v", members, want) + } +} + +func TestOrganizationsService_IsTeamMember_true(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/teams/1/members/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + }) + + member, _, err := client.Organizations.IsTeamMember(context.Background(), 1, "u") + if err != nil { + t.Errorf("Organizations.IsTeamMember returned error: %v", err) + } + if want := true; member != want { + t.Errorf("Organizations.IsTeamMember returned %+v, want %+v", member, want) + } +} + +// ensure that a 404 response is interpreted as "false" and not an error +func TestOrganizationsService_IsTeamMember_false(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/teams/1/members/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNotFound) + }) + + member, _, err := client.Organizations.IsTeamMember(context.Background(), 1, "u") + if err != nil { + t.Errorf("Organizations.IsTeamMember returned error: %+v", err) + } + if want := false; member != want { + t.Errorf("Organizations.IsTeamMember returned %+v, want %+v", member, want) + } +} + +// ensure that a 400 response is interpreted as an actual error, and not simply +// as "false" like the above case of a 404 +func TestOrganizationsService_IsTeamMember_error(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/teams/1/members/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + http.Error(w, "BadRequest", http.StatusBadRequest) + }) + + member, _, err := client.Organizations.IsTeamMember(context.Background(), 1, "u") + if err == nil { + t.Errorf("Expected HTTP 400 response") + } + if want := false; member != want { + t.Errorf("Organizations.IsTeamMember returned %+v, want %+v", member, want) + } +} + +func TestOrganizationsService_IsTeamMember_invalidUser(t *testing.T) { + _, _, err := client.Organizations.IsTeamMember(context.Background(), 1, "%") + testURLParseError(t, err) +} + +func TestOrganizationsService_PublicizeMembership(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/public_members/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PUT") + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Organizations.PublicizeMembership(context.Background(), "o", "u") + if err != nil { + t.Errorf("Organizations.PublicizeMembership returned error: %v", err) + } +} + +func TestOrganizationsService_PublicizeMembership_invalidOrg(t *testing.T) { + _, err := client.Organizations.PublicizeMembership(context.Background(), "%", "u") + testURLParseError(t, err) +} + +func TestOrganizationsService_ConcealMembership(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/public_members/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Organizations.ConcealMembership(context.Background(), "o", "u") + if err != nil { + t.Errorf("Organizations.ConcealMembership returned error: %v", err) + } +} + +func TestOrganizationsService_ConcealMembership_invalidOrg(t *testing.T) { + _, err := client.Organizations.ConcealMembership(context.Background(), "%", "u") + testURLParseError(t, err) +} + +func TestOrganizationsService_ListTeamRepos(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/teams/1/repos", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 2} + members, _, err := client.Organizations.ListTeamRepos(context.Background(), 1, opt) + if err != nil { + t.Errorf("Organizations.ListTeamRepos returned error: %v", err) + } + + want := []*Repository{{ID: Int(1)}} + if !reflect.DeepEqual(members, want) { + t.Errorf("Organizations.ListTeamRepos returned %+v, want %+v", members, want) + } +} + +func TestOrganizationsService_IsTeamRepo_true(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeOrgPermissionRepo) + fmt.Fprint(w, `{"id":1}`) + }) + + repo, _, err := client.Organizations.IsTeamRepo(context.Background(), 1, "o", "r") + if err != nil { + t.Errorf("Organizations.IsTeamRepo returned error: %v", err) + } + + want := &Repository{ID: Int(1)} + if !reflect.DeepEqual(repo, want) { + t.Errorf("Organizations.IsTeamRepo returned %+v, want %+v", repo, want) + } +} + +func TestOrganizationsService_IsTeamRepo_false(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNotFound) + }) + + repo, resp, err := client.Organizations.IsTeamRepo(context.Background(), 1, "o", "r") + if err == nil { + t.Errorf("Expected HTTP 404 response") + } + if got, want := resp.Response.StatusCode, http.StatusNotFound; got != want { + t.Errorf("Organizations.IsTeamRepo returned status %d, want %d", got, want) + } + if repo != nil { + t.Errorf("Organizations.IsTeamRepo returned %+v, want nil", repo) + } +} + +func TestOrganizationsService_IsTeamRepo_error(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + http.Error(w, "BadRequest", http.StatusBadRequest) + }) + + repo, resp, err := client.Organizations.IsTeamRepo(context.Background(), 1, "o", "r") + if err == nil { + t.Errorf("Expected HTTP 400 response") + } + if got, want := resp.Response.StatusCode, http.StatusBadRequest; got != want { + t.Errorf("Organizations.IsTeamRepo returned status %d, want %d", got, want) + } + if repo != nil { + t.Errorf("Organizations.IsTeamRepo returned %+v, want nil", repo) + } +} + +func TestOrganizationsService_IsTeamRepo_invalidOwner(t *testing.T) { + _, _, err := client.Organizations.IsTeamRepo(context.Background(), 1, "%", "r") + testURLParseError(t, err) +} + +func TestOrganizationsService_AddTeamRepo(t *testing.T) { + setup() + defer teardown() + + opt := &OrganizationAddTeamRepoOptions{Permission: "admin"} + + mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) { + v := new(OrganizationAddTeamRepoOptions) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PUT") + if !reflect.DeepEqual(v, opt) { + t.Errorf("Request body = %+v, want %+v", v, opt) + } + + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Organizations.AddTeamRepo(context.Background(), 1, "o", "r", opt) + if err != nil { + t.Errorf("Organizations.AddTeamRepo returned error: %v", err) + } +} + +func TestOrganizationsService_AddTeamRepo_noAccess(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PUT") + w.WriteHeader(http.StatusUnprocessableEntity) + }) + + _, err := client.Organizations.AddTeamRepo(context.Background(), 1, "o", "r", nil) + if err == nil { + t.Errorf("Expcted error to be returned") + } +} + +func TestOrganizationsService_AddTeamRepo_invalidOwner(t *testing.T) { + _, err := client.Organizations.AddTeamRepo(context.Background(), 1, "%", "r", nil) + testURLParseError(t, err) +} + +func TestOrganizationsService_RemoveTeamRepo(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Organizations.RemoveTeamRepo(context.Background(), 1, "o", "r") + if err != nil { + t.Errorf("Organizations.RemoveTeamRepo returned error: %v", err) + } +} + +func TestOrganizationsService_RemoveTeamRepo_invalidOwner(t *testing.T) { + _, err := client.Organizations.RemoveTeamRepo(context.Background(), 1, "%", "r") + testURLParseError(t, err) +} + +func TestOrganizationsService_GetTeamMembership(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/teams/1/memberships/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"url":"u", "state":"active"}`) + }) + + membership, _, err := client.Organizations.GetTeamMembership(context.Background(), 1, "u") + if err != nil { + t.Errorf("Organizations.GetTeamMembership returned error: %v", err) + } + + want := &Membership{URL: String("u"), State: String("active")} + if !reflect.DeepEqual(membership, want) { + t.Errorf("Organizations.GetTeamMembership returned %+v, want %+v", membership, want) + } +} + +func TestOrganizationsService_AddTeamMembership(t *testing.T) { + setup() + defer teardown() + + opt := &OrganizationAddTeamMembershipOptions{Role: "maintainer"} + + mux.HandleFunc("/teams/1/memberships/u", func(w http.ResponseWriter, r *http.Request) { + v := new(OrganizationAddTeamMembershipOptions) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PUT") + if !reflect.DeepEqual(v, opt) { + t.Errorf("Request body = %+v, want %+v", v, opt) + } + + fmt.Fprint(w, `{"url":"u", "state":"pending"}`) + }) + + membership, _, err := client.Organizations.AddTeamMembership(context.Background(), 1, "u", opt) + if err != nil { + t.Errorf("Organizations.AddTeamMembership returned error: %v", err) + } + + want := &Membership{URL: String("u"), State: String("pending")} + if !reflect.DeepEqual(membership, want) { + t.Errorf("Organizations.AddTeamMembership returned %+v, want %+v", membership, want) + } +} + +func TestOrganizationsService_RemoveTeamMembership(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/teams/1/memberships/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Organizations.RemoveTeamMembership(context.Background(), 1, "u") + if err != nil { + t.Errorf("Organizations.RemoveTeamMembership returned error: %v", err) + } +} + +func TestOrganizationsService_ListUserTeams(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/teams", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "1"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 1} + teams, _, err := client.Organizations.ListUserTeams(context.Background(), opt) + if err != nil { + t.Errorf("Organizations.ListUserTeams returned error: %v", err) + } + + want := []*Team{{ID: Int(1)}} + if !reflect.DeepEqual(teams, want) { + t.Errorf("Organizations.ListUserTeams returned %+v, want %+v", teams, want) + } +} + +func TestOrganizationsService_ListPendingTeamInvitations(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/teams/1/invitations", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "1"}) + fmt.Fprint(w, `[ + { + "id": 1, + "login": "monalisa", + "email": "octocat@github.com", + "role": "direct_member", + "created_at": "2017-01-21T00:00:00Z", + "inviter": { + "login": "other_user", + "id": 1, + "avatar_url": "https://github.com/images/error/other_user_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/other_user", + "html_url": "https://github.com/other_user", + "followers_url": "https://api.github.com/users/other_user/followers", + "following_url": "https://api.github.com/users/other_user/following/other_user", + "gists_url": "https://api.github.com/users/other_user/gists/gist_id", + "starred_url": "https://api.github.com/users/other_user/starred/owner/repo", + "subscriptions_url": "https://api.github.com/users/other_user/subscriptions", + "organizations_url": "https://api.github.com/users/other_user/orgs", + "repos_url": "https://api.github.com/users/other_user/repos", + "events_url": "https://api.github.com/users/other_user/events/privacy", + "received_events_url": "https://api.github.com/users/other_user/received_events/privacy", + "type": "User", + "site_admin": false + } + } + ]`) + }) + + opt := &ListOptions{Page: 1} + invitations, _, err := client.Organizations.ListPendingTeamInvitations(context.Background(), 1, opt) + if err != nil { + t.Errorf("Organizations.ListPendingTeamInvitations returned error: %v", err) + } + + createdAt := time.Date(2017, 01, 21, 0, 0, 0, 0, time.UTC) + want := []*Invitation{ + { + ID: Int(1), + Login: String("monalisa"), + Email: String("octocat@github.com"), + Role: String("direct_member"), + CreatedAt: &createdAt, + Inviter: &User{ + Login: String("other_user"), + ID: Int(1), + AvatarURL: String("https://github.com/images/error/other_user_happy.gif"), + GravatarID: String(""), + URL: String("https://api.github.com/users/other_user"), + HTMLURL: String("https://github.com/other_user"), + FollowersURL: String("https://api.github.com/users/other_user/followers"), + FollowingURL: String("https://api.github.com/users/other_user/following/other_user"), + GistsURL: String("https://api.github.com/users/other_user/gists/gist_id"), + StarredURL: String("https://api.github.com/users/other_user/starred/owner/repo"), + SubscriptionsURL: String("https://api.github.com/users/other_user/subscriptions"), + OrganizationsURL: String("https://api.github.com/users/other_user/orgs"), + ReposURL: String("https://api.github.com/users/other_user/repos"), + EventsURL: String("https://api.github.com/users/other_user/events/privacy"), + ReceivedEventsURL: String("https://api.github.com/users/other_user/received_events/privacy"), + Type: String("User"), + SiteAdmin: Bool(false), + }, + }} + + if !reflect.DeepEqual(invitations, want) { + t.Errorf("Organizations.ListPendingTeamInvitations returned %+v, want %+v", invitations, want) + } +} diff --git a/vendor/github.com/google/go-github/github/orgs_test.go b/vendor/github.com/google/go-github/github/orgs_test.go new file mode 100644 index 000000000000..f80c9496a80a --- /dev/null +++ b/vendor/github.com/google/go-github/github/orgs_test.go @@ -0,0 +1,144 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestOrganizationsService_ListAll(t *testing.T) { + setup() + defer teardown() + + since := 1342004 + mux.HandleFunc("/organizations", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"since": "1342004"}) + fmt.Fprint(w, `[{"id":4314092}]`) + }) + + opt := &OrganizationsListOptions{Since: since} + orgs, _, err := client.Organizations.ListAll(context.Background(), opt) + if err != nil { + t.Errorf("Organizations.ListAll returned error: %v", err) + } + + want := []*Organization{{ID: Int(4314092)}} + if !reflect.DeepEqual(orgs, want) { + t.Errorf("Organizations.ListAll returned %+v, want %+v", orgs, want) + } +} + +func TestOrganizationsService_List_authenticatedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/orgs", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[{"id":1},{"id":2}]`) + }) + + orgs, _, err := client.Organizations.List(context.Background(), "", nil) + if err != nil { + t.Errorf("Organizations.List returned error: %v", err) + } + + want := []*Organization{{ID: Int(1)}, {ID: Int(2)}} + if !reflect.DeepEqual(orgs, want) { + t.Errorf("Organizations.List returned %+v, want %+v", orgs, want) + } +} + +func TestOrganizationsService_List_specifiedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/orgs", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1},{"id":2}]`) + }) + + opt := &ListOptions{Page: 2} + orgs, _, err := client.Organizations.List(context.Background(), "u", opt) + if err != nil { + t.Errorf("Organizations.List returned error: %v", err) + } + + want := []*Organization{{ID: Int(1)}, {ID: Int(2)}} + if !reflect.DeepEqual(orgs, want) { + t.Errorf("Organizations.List returned %+v, want %+v", orgs, want) + } +} + +func TestOrganizationsService_List_invalidUser(t *testing.T) { + _, _, err := client.Organizations.List(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestOrganizationsService_Get(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":1, "login":"l", "url":"u", "avatar_url": "a", "location":"l"}`) + }) + + org, _, err := client.Organizations.Get(context.Background(), "o") + if err != nil { + t.Errorf("Organizations.Get returned error: %v", err) + } + + want := &Organization{ID: Int(1), Login: String("l"), URL: String("u"), AvatarURL: String("a"), Location: String("l")} + if !reflect.DeepEqual(org, want) { + t.Errorf("Organizations.Get returned %+v, want %+v", org, want) + } +} + +func TestOrganizationsService_Get_invalidOrg(t *testing.T) { + _, _, err := client.Organizations.Get(context.Background(), "%") + testURLParseError(t, err) +} + +func TestOrganizationsService_Edit(t *testing.T) { + setup() + defer teardown() + + input := &Organization{Login: String("l")} + + mux.HandleFunc("/orgs/o", func(w http.ResponseWriter, r *http.Request) { + v := new(Organization) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + org, _, err := client.Organizations.Edit(context.Background(), "o", input) + if err != nil { + t.Errorf("Organizations.Edit returned error: %v", err) + } + + want := &Organization{ID: Int(1)} + if !reflect.DeepEqual(org, want) { + t.Errorf("Organizations.Edit returned %+v, want %+v", org, want) + } +} + +func TestOrganizationsService_Edit_invalidOrg(t *testing.T) { + _, _, err := client.Organizations.Edit(context.Background(), "%", nil) + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/orgs_users_blocking_test.go b/vendor/github.com/google/go-github/github/orgs_users_blocking_test.go new file mode 100644 index 000000000000..53be507ef215 --- /dev/null +++ b/vendor/github.com/google/go-github/github/orgs_users_blocking_test.go @@ -0,0 +1,90 @@ +// Copyright 2017 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestOrganizationsService_ListBlockedUsers(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/blocks", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeBlockUsersPreview) + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{ + "login": "octocat" + }]`) + }) + + opt := &ListOptions{Page: 2} + blockedUsers, _, err := client.Organizations.ListBlockedUsers(context.Background(), "o", opt) + if err != nil { + t.Errorf("Organizations.ListBlockedUsers returned error: %v", err) + } + + want := []*User{{Login: String("octocat")}} + if !reflect.DeepEqual(blockedUsers, want) { + t.Errorf("Organizations.ListBlockedUsers returned %+v, want %+v", blockedUsers, want) + } +} + +func TestOrganizationsService_IsBlocked(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/blocks/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeBlockUsersPreview) + w.WriteHeader(http.StatusNoContent) + }) + + isBlocked, _, err := client.Organizations.IsBlocked(context.Background(), "o", "u") + if err != nil { + t.Errorf("Organizations.IsBlocked returned error: %v", err) + } + if want := true; isBlocked != want { + t.Errorf("Organizations.IsBlocked returned %+v, want %+v", isBlocked, want) + } +} + +func TestOrganizationsService_BlockUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/blocks/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PUT") + testHeader(t, r, "Accept", mediaTypeBlockUsersPreview) + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Organizations.BlockUser(context.Background(), "o", "u") + if err != nil { + t.Errorf("Organizations.BlockUser returned error: %v", err) + } +} + +func TestOrganizationsService_UnblockUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/blocks/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + testHeader(t, r, "Accept", mediaTypeBlockUsersPreview) + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Organizations.UnblockUser(context.Background(), "o", "u") + if err != nil { + t.Errorf("Organizations.UnblockUser returned error: %v", err) + } +} diff --git a/vendor/github.com/google/go-github/github/projects_test.go b/vendor/github.com/google/go-github/github/projects_test.go new file mode 100644 index 000000000000..1045ca9befa6 --- /dev/null +++ b/vendor/github.com/google/go-github/github/projects_test.go @@ -0,0 +1,371 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestProjectsService_UpdateProject(t *testing.T) { + setup() + defer teardown() + + input := &ProjectOptions{Name: "Project Name", Body: "Project body."} + + mux.HandleFunc("/projects/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PATCH") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + + v := &ProjectOptions{} + json.NewDecoder(r.Body).Decode(v) + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + project, _, err := client.Projects.UpdateProject(context.Background(), 1, input) + if err != nil { + t.Errorf("Projects.UpdateProject returned error: %v", err) + } + + want := &Project{ID: Int(1)} + if !reflect.DeepEqual(project, want) { + t.Errorf("Projects.UpdateProject returned %+v, want %+v", project, want) + } +} + +func TestProjectsService_GetProject(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/projects/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + fmt.Fprint(w, `{"id":1}`) + }) + + project, _, err := client.Projects.GetProject(context.Background(), 1) + if err != nil { + t.Errorf("Projects.GetProject returned error: %v", err) + } + + want := &Project{ID: Int(1)} + if !reflect.DeepEqual(project, want) { + t.Errorf("Projects.GetProject returned %+v, want %+v", project, want) + } +} + +func TestProjectsService_DeleteProject(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/projects/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + }) + + _, err := client.Projects.DeleteProject(context.Background(), 1) + if err != nil { + t.Errorf("Projects.DeleteProject returned error: %v", err) + } +} + +func TestProjectsService_ListProjectColumns(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/projects/1/columns", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 2} + columns, _, err := client.Projects.ListProjectColumns(context.Background(), 1, opt) + if err != nil { + t.Errorf("Projects.ListProjectColumns returned error: %v", err) + } + + want := []*ProjectColumn{{ID: Int(1)}} + if !reflect.DeepEqual(columns, want) { + t.Errorf("Projects.ListProjectColumns returned %+v, want %+v", columns, want) + } +} + +func TestProjectsService_GetProjectColumn(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/projects/columns/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + fmt.Fprint(w, `{"id":1}`) + }) + + column, _, err := client.Projects.GetProjectColumn(context.Background(), 1) + if err != nil { + t.Errorf("Projects.GetProjectColumn returned error: %v", err) + } + + want := &ProjectColumn{ID: Int(1)} + if !reflect.DeepEqual(column, want) { + t.Errorf("Projects.GetProjectColumn returned %+v, want %+v", column, want) + } +} + +func TestProjectsService_CreateProjectColumn(t *testing.T) { + setup() + defer teardown() + + input := &ProjectColumnOptions{Name: "Column Name"} + + mux.HandleFunc("/projects/1/columns", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + + v := &ProjectColumnOptions{} + json.NewDecoder(r.Body).Decode(v) + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + column, _, err := client.Projects.CreateProjectColumn(context.Background(), 1, input) + if err != nil { + t.Errorf("Projects.CreateProjectColumn returned error: %v", err) + } + + want := &ProjectColumn{ID: Int(1)} + if !reflect.DeepEqual(column, want) { + t.Errorf("Projects.CreateProjectColumn returned %+v, want %+v", column, want) + } +} + +func TestProjectsService_UpdateProjectColumn(t *testing.T) { + setup() + defer teardown() + + input := &ProjectColumnOptions{Name: "Column Name"} + + mux.HandleFunc("/projects/columns/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PATCH") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + + v := &ProjectColumnOptions{} + json.NewDecoder(r.Body).Decode(v) + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + column, _, err := client.Projects.UpdateProjectColumn(context.Background(), 1, input) + if err != nil { + t.Errorf("Projects.UpdateProjectColumn returned error: %v", err) + } + + want := &ProjectColumn{ID: Int(1)} + if !reflect.DeepEqual(column, want) { + t.Errorf("Projects.UpdateProjectColumn returned %+v, want %+v", column, want) + } +} + +func TestProjectsService_DeleteProjectColumn(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/projects/columns/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + }) + + _, err := client.Projects.DeleteProjectColumn(context.Background(), 1) + if err != nil { + t.Errorf("Projects.DeleteProjectColumn returned error: %v", err) + } +} + +func TestProjectsService_MoveProjectColumn(t *testing.T) { + setup() + defer teardown() + + input := &ProjectColumnMoveOptions{Position: "after:12345"} + + mux.HandleFunc("/projects/columns/1/moves", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + + v := &ProjectColumnMoveOptions{} + json.NewDecoder(r.Body).Decode(v) + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + }) + + _, err := client.Projects.MoveProjectColumn(context.Background(), 1, input) + if err != nil { + t.Errorf("Projects.MoveProjectColumn returned error: %v", err) + } +} + +func TestProjectsService_ListProjectCards(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/projects/columns/1/cards", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 2} + cards, _, err := client.Projects.ListProjectCards(context.Background(), 1, opt) + if err != nil { + t.Errorf("Projects.ListProjectCards returned error: %v", err) + } + + want := []*ProjectCard{{ID: Int(1)}} + if !reflect.DeepEqual(cards, want) { + t.Errorf("Projects.ListProjectCards returned %+v, want %+v", cards, want) + } +} + +func TestProjectsService_GetProjectCard(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/projects/columns/cards/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + fmt.Fprint(w, `{"id":1}`) + }) + + card, _, err := client.Projects.GetProjectCard(context.Background(), 1) + if err != nil { + t.Errorf("Projects.GetProjectCard returned error: %v", err) + } + + want := &ProjectCard{ID: Int(1)} + if !reflect.DeepEqual(card, want) { + t.Errorf("Projects.GetProjectCard returned %+v, want %+v", card, want) + } +} + +func TestProjectsService_CreateProjectCard(t *testing.T) { + setup() + defer teardown() + + input := &ProjectCardOptions{ + ContentID: 12345, + ContentType: "Issue", + } + + mux.HandleFunc("/projects/columns/1/cards", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + + v := &ProjectCardOptions{} + json.NewDecoder(r.Body).Decode(v) + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + card, _, err := client.Projects.CreateProjectCard(context.Background(), 1, input) + if err != nil { + t.Errorf("Projects.CreateProjectCard returned error: %v", err) + } + + want := &ProjectCard{ID: Int(1)} + if !reflect.DeepEqual(card, want) { + t.Errorf("Projects.CreateProjectCard returned %+v, want %+v", card, want) + } +} + +func TestProjectsService_UpdateProjectCard(t *testing.T) { + setup() + defer teardown() + + input := &ProjectCardOptions{ + ContentID: 12345, + ContentType: "Issue", + } + + mux.HandleFunc("/projects/columns/cards/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PATCH") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + + v := &ProjectCardOptions{} + json.NewDecoder(r.Body).Decode(v) + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + card, _, err := client.Projects.UpdateProjectCard(context.Background(), 1, input) + if err != nil { + t.Errorf("Projects.UpdateProjectCard returned error: %v", err) + } + + want := &ProjectCard{ID: Int(1)} + if !reflect.DeepEqual(card, want) { + t.Errorf("Projects.UpdateProjectCard returned %+v, want %+v", card, want) + } +} + +func TestProjectsService_DeleteProjectCard(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/projects/columns/cards/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + }) + + _, err := client.Projects.DeleteProjectCard(context.Background(), 1) + if err != nil { + t.Errorf("Projects.DeleteProjectCard returned error: %v", err) + } +} + +func TestProjectsService_MoveProjectCard(t *testing.T) { + setup() + defer teardown() + + input := &ProjectCardMoveOptions{Position: "after:12345"} + + mux.HandleFunc("/projects/columns/cards/1/moves", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + + v := &ProjectCardMoveOptions{} + json.NewDecoder(r.Body).Decode(v) + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + }) + + _, err := client.Projects.MoveProjectCard(context.Background(), 1, input) + if err != nil { + t.Errorf("Projects.MoveProjectCard returned error: %v", err) + } +} diff --git a/vendor/github.com/google/go-github/github/pulls_comments_test.go b/vendor/github.com/google/go-github/github/pulls_comments_test.go new file mode 100644 index 000000000000..e35b04f7b7b8 --- /dev/null +++ b/vendor/github.com/google/go-github/github/pulls_comments_test.go @@ -0,0 +1,188 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" + "time" +) + +func TestPullRequestsService_ListComments_allPulls(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/comments", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + testFormValues(t, r, values{ + "sort": "updated", + "direction": "desc", + "since": "2002-02-10T15:30:00Z", + "page": "2", + }) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &PullRequestListCommentsOptions{ + Sort: "updated", + Direction: "desc", + Since: time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC), + ListOptions: ListOptions{Page: 2}, + } + pulls, _, err := client.PullRequests.ListComments(context.Background(), "o", "r", 0, opt) + if err != nil { + t.Errorf("PullRequests.ListComments returned error: %v", err) + } + + want := []*PullRequestComment{{ID: Int(1)}} + if !reflect.DeepEqual(pulls, want) { + t.Errorf("PullRequests.ListComments returned %+v, want %+v", pulls, want) + } +} + +func TestPullRequestsService_ListComments_specificPull(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/1/comments", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + fmt.Fprint(w, `[{"id":1}]`) + }) + + pulls, _, err := client.PullRequests.ListComments(context.Background(), "o", "r", 1, nil) + if err != nil { + t.Errorf("PullRequests.ListComments returned error: %v", err) + } + + want := []*PullRequestComment{{ID: Int(1)}} + if !reflect.DeepEqual(pulls, want) { + t.Errorf("PullRequests.ListComments returned %+v, want %+v", pulls, want) + } +} + +func TestPullRequestsService_ListComments_invalidOwner(t *testing.T) { + _, _, err := client.PullRequests.ListComments(context.Background(), "%", "r", 1, nil) + testURLParseError(t, err) +} + +func TestPullRequestsService_GetComment(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/comments/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + fmt.Fprint(w, `{"id":1}`) + }) + + comment, _, err := client.PullRequests.GetComment(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("PullRequests.GetComment returned error: %v", err) + } + + want := &PullRequestComment{ID: Int(1)} + if !reflect.DeepEqual(comment, want) { + t.Errorf("PullRequests.GetComment returned %+v, want %+v", comment, want) + } +} + +func TestPullRequestsService_GetComment_invalidOwner(t *testing.T) { + _, _, err := client.PullRequests.GetComment(context.Background(), "%", "r", 1) + testURLParseError(t, err) +} + +func TestPullRequestsService_CreateComment(t *testing.T) { + setup() + defer teardown() + + input := &PullRequestComment{Body: String("b")} + + mux.HandleFunc("/repos/o/r/pulls/1/comments", func(w http.ResponseWriter, r *http.Request) { + v := new(PullRequestComment) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + comment, _, err := client.PullRequests.CreateComment(context.Background(), "o", "r", 1, input) + if err != nil { + t.Errorf("PullRequests.CreateComment returned error: %v", err) + } + + want := &PullRequestComment{ID: Int(1)} + if !reflect.DeepEqual(comment, want) { + t.Errorf("PullRequests.CreateComment returned %+v, want %+v", comment, want) + } +} + +func TestPullRequestsService_CreateComment_invalidOwner(t *testing.T) { + _, _, err := client.PullRequests.CreateComment(context.Background(), "%", "r", 1, nil) + testURLParseError(t, err) +} + +func TestPullRequestsService_EditComment(t *testing.T) { + setup() + defer teardown() + + input := &PullRequestComment{Body: String("b")} + + mux.HandleFunc("/repos/o/r/pulls/comments/1", func(w http.ResponseWriter, r *http.Request) { + v := new(PullRequestComment) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + comment, _, err := client.PullRequests.EditComment(context.Background(), "o", "r", 1, input) + if err != nil { + t.Errorf("PullRequests.EditComment returned error: %v", err) + } + + want := &PullRequestComment{ID: Int(1)} + if !reflect.DeepEqual(comment, want) { + t.Errorf("PullRequests.EditComment returned %+v, want %+v", comment, want) + } +} + +func TestPullRequestsService_EditComment_invalidOwner(t *testing.T) { + _, _, err := client.PullRequests.EditComment(context.Background(), "%", "r", 1, nil) + testURLParseError(t, err) +} + +func TestPullRequestsService_DeleteComment(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/comments/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.PullRequests.DeleteComment(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("PullRequests.DeleteComment returned error: %v", err) + } +} + +func TestPullRequestsService_DeleteComment_invalidOwner(t *testing.T) { + _, err := client.PullRequests.DeleteComment(context.Background(), "%", "r", 1) + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/pulls_reviewers_test.go b/vendor/github.com/google/go-github/github/pulls_reviewers_test.go new file mode 100644 index 000000000000..a60f2e77b387 --- /dev/null +++ b/vendor/github.com/google/go-github/github/pulls_reviewers_test.go @@ -0,0 +1,93 @@ +// Copyright 2017 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestRequestReviewers(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/1/requested_reviewers", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + testBody(t, r, `{"reviewers":["octocat","googlebot"]}`+"\n") + fmt.Fprint(w, `{"number":1}`) + }) + + // This returns a PR, unmarshalling of which is tested elsewhere + pull, _, err := client.PullRequests.RequestReviewers(context.Background(), "o", "r", 1, []string{"octocat", "googlebot"}) + if err != nil { + t.Errorf("PullRequests.RequestReviewers returned error: %v", err) + } + want := &PullRequest{Number: Int(1)} + if !reflect.DeepEqual(pull, want) { + t.Errorf("PullRequests.RequestReviewers returned %+v, want %+v", pull, want) + } +} + +func TestRemoveReviewers(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/1/requested_reviewers", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + testBody(t, r, `{"reviewers":["octocat","googlebot"]}`+"\n") + }) + + _, err := client.PullRequests.RemoveReviewers(context.Background(), "o", "r", 1, []string{"octocat", "googlebot"}) + if err != nil { + t.Errorf("PullRequests.RemoveReviewers returned error: %v", err) + } +} + +func TestListReviewers(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/1/requested_reviewers", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[{"login":"octocat","id":1}]`) + }) + + reviewers, _, err := client.PullRequests.ListReviewers(context.Background(), "o", "r", 1, nil) + if err != nil { + t.Errorf("PullRequests.ListReviewers returned error: %v", err) + } + + want := []*User{ + { + Login: String("octocat"), + ID: Int(1), + }, + } + if !reflect.DeepEqual(reviewers, want) { + t.Errorf("PullRequests.ListReviewers returned %+v, want %+v", reviewers, want) + } +} + +func TestListReviewers_withOptions(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/1/requested_reviewers", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + fmt.Fprint(w, `[]`) + }) + + _, _, err := client.PullRequests.ListReviewers(context.Background(), "o", "r", 1, &ListOptions{Page: 2}) + if err != nil { + t.Errorf("PullRequests.ListReviewers returned error: %v", err) + } +} diff --git a/vendor/github.com/google/go-github/github/pulls_reviews_test.go b/vendor/github.com/google/go-github/github/pulls_reviews_test.go new file mode 100644 index 000000000000..d72de4709b09 --- /dev/null +++ b/vendor/github.com/google/go-github/github/pulls_reviews_test.go @@ -0,0 +1,251 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestPullRequestsService_ListReviews(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/1/reviews", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + fmt.Fprint(w, `[{"id":1},{"id":2}]`) + }) + + opt := &ListOptions{Page: 2} + reviews, _, err := client.PullRequests.ListReviews(context.Background(), "o", "r", 1, opt) + if err != nil { + t.Errorf("PullRequests.ListReviews returned error: %v", err) + } + + want := []*PullRequestReview{ + {ID: Int(1)}, + {ID: Int(2)}, + } + if !reflect.DeepEqual(reviews, want) { + t.Errorf("PullRequests.ListReviews returned %+v, want %+v", reviews, want) + } +} + +func TestPullRequestsService_ListReviews_invalidOwner(t *testing.T) { + _, _, err := client.PullRequests.ListReviews(context.Background(), "%", "r", 1, nil) + testURLParseError(t, err) +} + +func TestPullRequestsService_GetReview(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/1/reviews/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":1}`) + }) + + review, _, err := client.PullRequests.GetReview(context.Background(), "o", "r", 1, 1) + if err != nil { + t.Errorf("PullRequests.GetReview returned error: %v", err) + } + + want := &PullRequestReview{ID: Int(1)} + if !reflect.DeepEqual(review, want) { + t.Errorf("PullRequests.GetReview returned %+v, want %+v", review, want) + } +} + +func TestPullRequestsService_GetReview_invalidOwner(t *testing.T) { + _, _, err := client.PullRequests.GetReview(context.Background(), "%", "r", 1, 1) + testURLParseError(t, err) +} + +func TestPullRequestsService_DeletePendingReview(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/1/reviews/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + fmt.Fprint(w, `{"id":1}`) + }) + + review, _, err := client.PullRequests.DeletePendingReview(context.Background(), "o", "r", 1, 1) + if err != nil { + t.Errorf("PullRequests.DeletePendingReview returned error: %v", err) + } + + want := &PullRequestReview{ID: Int(1)} + if !reflect.DeepEqual(review, want) { + t.Errorf("PullRequests.DeletePendingReview returned %+v, want %+v", review, want) + } +} + +func TestPullRequestsService_DeletePendingReview_invalidOwner(t *testing.T) { + _, _, err := client.PullRequests.DeletePendingReview(context.Background(), "%", "r", 1, 1) + testURLParseError(t, err) +} + +func TestPullRequestsService_ListReviewComments(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/1/reviews/1/comments", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[{"id":1},{"id":2}]`) + }) + + comments, _, err := client.PullRequests.ListReviewComments(context.Background(), "o", "r", 1, 1, nil) + if err != nil { + t.Errorf("PullRequests.ListReviewComments returned error: %v", err) + } + + want := []*PullRequestComment{ + {ID: Int(1)}, + {ID: Int(2)}, + } + if !reflect.DeepEqual(comments, want) { + t.Errorf("PullRequests.ListReviewComments returned %+v, want %+v", comments, want) + } +} + +func TestPullRequestsService_ListReviewComments_withOptions(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/1/reviews/1/comments", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + fmt.Fprint(w, `[]`) + }) + + _, _, err := client.PullRequests.ListReviewComments(context.Background(), "o", "r", 1, 1, &ListOptions{Page: 2}) + if err != nil { + t.Errorf("PullRequests.ListReviewComments returned error: %v", err) + } +} + +func TestPullRequestsService_ListReviewComments_invalidOwner(t *testing.T) { + _, _, err := client.PullRequests.ListReviewComments(context.Background(), "%", "r", 1, 1, nil) + testURLParseError(t, err) +} + +func TestPullRequestsService_CreateReview(t *testing.T) { + setup() + defer teardown() + + input := &PullRequestReviewRequest{ + Body: String("b"), + Event: String("APPROVE"), + } + + mux.HandleFunc("/repos/o/r/pulls/1/reviews", func(w http.ResponseWriter, r *http.Request) { + v := new(PullRequestReviewRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + review, _, err := client.PullRequests.CreateReview(context.Background(), "o", "r", 1, input) + if err != nil { + t.Errorf("PullRequests.CreateReview returned error: %v", err) + } + + want := &PullRequestReview{ID: Int(1)} + if !reflect.DeepEqual(review, want) { + t.Errorf("PullRequests.CreateReview returned %+v, want %+v", review, want) + } +} + +func TestPullRequestsService_CreateReview_invalidOwner(t *testing.T) { + _, _, err := client.PullRequests.CreateReview(context.Background(), "%", "r", 1, &PullRequestReviewRequest{}) + testURLParseError(t, err) +} + +func TestPullRequestsService_SubmitReview(t *testing.T) { + setup() + defer teardown() + + input := &PullRequestReviewRequest{ + Body: String("b"), + Event: String("APPROVE"), + } + + mux.HandleFunc("/repos/o/r/pulls/1/reviews/1/events", func(w http.ResponseWriter, r *http.Request) { + v := new(PullRequestReviewRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + review, _, err := client.PullRequests.SubmitReview(context.Background(), "o", "r", 1, 1, input) + if err != nil { + t.Errorf("PullRequests.SubmitReview returned error: %v", err) + } + + want := &PullRequestReview{ID: Int(1)} + if !reflect.DeepEqual(review, want) { + t.Errorf("PullRequests.SubmitReview returned %+v, want %+v", review, want) + } +} + +func TestPullRequestsService_SubmitReview_invalidOwner(t *testing.T) { + _, _, err := client.PullRequests.SubmitReview(context.Background(), "%", "r", 1, 1, &PullRequestReviewRequest{}) + testURLParseError(t, err) +} + +func TestPullRequestsService_DismissReview(t *testing.T) { + setup() + defer teardown() + + input := &PullRequestReviewDismissalRequest{Message: String("m")} + + mux.HandleFunc("/repos/o/r/pulls/1/reviews/1/dismissals", func(w http.ResponseWriter, r *http.Request) { + v := new(PullRequestReviewDismissalRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PUT") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + review, _, err := client.PullRequests.DismissReview(context.Background(), "o", "r", 1, 1, input) + if err != nil { + t.Errorf("PullRequests.DismissReview returned error: %v", err) + } + + want := &PullRequestReview{ID: Int(1)} + if !reflect.DeepEqual(review, want) { + t.Errorf("PullRequests.DismissReview returned %+v, want %+v", review, want) + } +} + +func TestPullRequestsService_DismissReview_invalidOwner(t *testing.T) { + _, _, err := client.PullRequests.DismissReview(context.Background(), "%", "r", 1, 1, &PullRequestReviewDismissalRequest{}) + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/pulls_test.go b/vendor/github.com/google/go-github/github/pulls_test.go new file mode 100644 index 000000000000..b45883141686 --- /dev/null +++ b/vendor/github.com/google/go-github/github/pulls_test.go @@ -0,0 +1,512 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "reflect" + "strings" + "testing" +) + +func TestPullRequestsService_List(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "state": "closed", + "head": "h", + "base": "b", + "sort": "created", + "direction": "desc", + "page": "2", + }) + fmt.Fprint(w, `[{"number":1}]`) + }) + + opt := &PullRequestListOptions{"closed", "h", "b", "created", "desc", ListOptions{Page: 2}} + pulls, _, err := client.PullRequests.List(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("PullRequests.List returned error: %v", err) + } + + want := []*PullRequest{{Number: Int(1)}} + if !reflect.DeepEqual(pulls, want) { + t.Errorf("PullRequests.List returned %+v, want %+v", pulls, want) + } +} + +func TestPullRequestsService_List_invalidOwner(t *testing.T) { + _, _, err := client.PullRequests.List(context.Background(), "%", "r", nil) + testURLParseError(t, err) +} + +func TestPullRequestsService_Get(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"number":1}`) + }) + + pull, _, err := client.PullRequests.Get(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("PullRequests.Get returned error: %v", err) + } + + want := &PullRequest{Number: Int(1)} + if !reflect.DeepEqual(pull, want) { + t.Errorf("PullRequests.Get returned %+v, want %+v", pull, want) + } +} + +func TestPullRequestsService_GetRawDiff(t *testing.T) { + setup() + defer teardown() + const rawStr = "@@diff content" + + mux.HandleFunc("/repos/o/r/pulls/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeV3Diff) + fmt.Fprint(w, rawStr) + }) + + ret, _, err := client.PullRequests.GetRaw(context.Background(), "o", "r", 1, RawOptions{Diff}) + if err != nil { + t.Fatalf("PullRequests.GetRaw returned error: %v", err) + } + + if ret != rawStr { + t.Errorf("PullRequests.GetRaw returned %s want %s", ret, rawStr) + } +} + +func TestPullRequestsService_GetRawPatch(t *testing.T) { + setup() + defer teardown() + const rawStr = "@@patch content" + + mux.HandleFunc("/repos/o/r/pulls/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeV3Patch) + fmt.Fprint(w, rawStr) + }) + + ret, _, err := client.PullRequests.GetRaw(context.Background(), "o", "r", 1, RawOptions{Patch}) + if err != nil { + t.Fatalf("PullRequests.GetRaw returned error: %v", err) + } + + if ret != rawStr { + t.Errorf("PullRequests.GetRaw returned %s want %s", ret, rawStr) + } +} + +func TestPullRequestsService_GetRawInvalid(t *testing.T) { + setup() + defer teardown() + + _, _, err := client.PullRequests.GetRaw(context.Background(), "o", "r", 1, RawOptions{100}) + if err == nil { + t.Fatal("PullRequests.GetRaw should return error") + } + if !strings.Contains(err.Error(), "unsupported raw type") { + t.Error("PullRequests.GetRaw should return unsupported raw type error") + } +} + +func TestPullRequestsService_Get_headAndBase(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"number":1,"head":{"ref":"r2","repo":{"id":2}},"base":{"ref":"r1","repo":{"id":1}}}`) + }) + + pull, _, err := client.PullRequests.Get(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("PullRequests.Get returned error: %v", err) + } + + want := &PullRequest{ + Number: Int(1), + Head: &PullRequestBranch{ + Ref: String("r2"), + Repo: &Repository{ID: Int(2)}, + }, + Base: &PullRequestBranch{ + Ref: String("r1"), + Repo: &Repository{ID: Int(1)}, + }, + } + if !reflect.DeepEqual(pull, want) { + t.Errorf("PullRequests.Get returned %+v, want %+v", pull, want) + } +} + +func TestPullRequestsService_Get_urlFields(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"number":1, + "url": "https://api.github.com/repos/octocat/Hello-World/pulls/1347", + "html_url": "https://github.com/octocat/Hello-World/pull/1347", + "issue_url": "https://api.github.com/repos/octocat/Hello-World/issues/1347", + "statuses_url": "https://api.github.com/repos/octocat/Hello-World/statuses/6dcb09b5b57875f334f61aebed695e2e4193db5e", + "diff_url": "https://github.com/octocat/Hello-World/pull/1347.diff", + "patch_url": "https://github.com/octocat/Hello-World/pull/1347.patch", + "review_comments_url": "https://api.github.com/repos/octocat/Hello-World/pulls/1347/comments", + "review_comment_url": "https://api.github.com/repos/octocat/Hello-World/pulls/comments{/number}"}`) + }) + + pull, _, err := client.PullRequests.Get(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("PullRequests.Get returned error: %v", err) + } + + want := &PullRequest{ + Number: Int(1), + URL: String("https://api.github.com/repos/octocat/Hello-World/pulls/1347"), + HTMLURL: String("https://github.com/octocat/Hello-World/pull/1347"), + IssueURL: String("https://api.github.com/repos/octocat/Hello-World/issues/1347"), + StatusesURL: String("https://api.github.com/repos/octocat/Hello-World/statuses/6dcb09b5b57875f334f61aebed695e2e4193db5e"), + DiffURL: String("https://github.com/octocat/Hello-World/pull/1347.diff"), + PatchURL: String("https://github.com/octocat/Hello-World/pull/1347.patch"), + ReviewCommentsURL: String("https://api.github.com/repos/octocat/Hello-World/pulls/1347/comments"), + ReviewCommentURL: String("https://api.github.com/repos/octocat/Hello-World/pulls/comments{/number}"), + } + + if !reflect.DeepEqual(pull, want) { + t.Errorf("PullRequests.Get returned %+v, want %+v", pull, want) + } +} + +func TestPullRequestsService_Get_invalidOwner(t *testing.T) { + _, _, err := client.PullRequests.Get(context.Background(), "%", "r", 1) + testURLParseError(t, err) +} + +func TestPullRequestsService_Create(t *testing.T) { + setup() + defer teardown() + + input := &NewPullRequest{Title: String("t")} + + mux.HandleFunc("/repos/o/r/pulls", func(w http.ResponseWriter, r *http.Request) { + v := new(NewPullRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"number":1}`) + }) + + pull, _, err := client.PullRequests.Create(context.Background(), "o", "r", input) + if err != nil { + t.Errorf("PullRequests.Create returned error: %v", err) + } + + want := &PullRequest{Number: Int(1)} + if !reflect.DeepEqual(pull, want) { + t.Errorf("PullRequests.Create returned %+v, want %+v", pull, want) + } +} + +func TestPullRequestsService_Create_invalidOwner(t *testing.T) { + _, _, err := client.PullRequests.Create(context.Background(), "%", "r", nil) + testURLParseError(t, err) +} + +func TestPullRequestsService_Edit(t *testing.T) { + setup() + defer teardown() + + tests := []struct { + input *PullRequest + sendResponse string + + wantUpdate string + want *PullRequest + }{ + { + input: &PullRequest{Title: String("t")}, + sendResponse: `{"number":1}`, + wantUpdate: `{"title":"t"}`, + want: &PullRequest{Number: Int(1)}, + }, + { + // base update + input: &PullRequest{Base: &PullRequestBranch{Ref: String("master")}}, + sendResponse: `{"number":1,"base":{"ref":"master"}}`, + wantUpdate: `{"base":"master"}`, + want: &PullRequest{ + Number: Int(1), + Base: &PullRequestBranch{Ref: String("master")}, + }, + }, + } + + for i, tt := range tests { + madeRequest := false + mux.HandleFunc(fmt.Sprintf("/repos/o/r/pulls/%v", i), func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PATCH") + testBody(t, r, tt.wantUpdate+"\n") + io.WriteString(w, tt.sendResponse) + madeRequest = true + }) + + pull, _, err := client.PullRequests.Edit(context.Background(), "o", "r", i, tt.input) + if err != nil { + t.Errorf("%d: PullRequests.Edit returned error: %v", i, err) + } + + if !reflect.DeepEqual(pull, tt.want) { + t.Errorf("%d: PullRequests.Edit returned %+v, want %+v", i, pull, tt.want) + } + + if !madeRequest { + t.Errorf("%d: PullRequest.Edit did not make the expected request", i) + } + } +} + +func TestPullRequestsService_Edit_invalidOwner(t *testing.T) { + _, _, err := client.PullRequests.Edit(context.Background(), "%", "r", 1, &PullRequest{}) + testURLParseError(t, err) +} + +func TestPullRequestsService_ListCommits(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/1/commits", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, ` + [ + { + "sha": "3", + "parents": [ + { + "sha": "2" + } + ] + }, + { + "sha": "2", + "parents": [ + { + "sha": "1" + } + ] + } + ]`) + }) + + opt := &ListOptions{Page: 2} + commits, _, err := client.PullRequests.ListCommits(context.Background(), "o", "r", 1, opt) + if err != nil { + t.Errorf("PullRequests.ListCommits returned error: %v", err) + } + + want := []*RepositoryCommit{ + { + SHA: String("3"), + Parents: []Commit{ + { + SHA: String("2"), + }, + }, + }, + { + SHA: String("2"), + Parents: []Commit{ + { + SHA: String("1"), + }, + }, + }, + } + if !reflect.DeepEqual(commits, want) { + t.Errorf("PullRequests.ListCommits returned %+v, want %+v", commits, want) + } +} + +func TestPullRequestsService_ListFiles(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/1/files", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, ` + [ + { + "sha": "6dcb09b5b57875f334f61aebed695e2e4193db5e", + "filename": "file1.txt", + "status": "added", + "additions": 103, + "deletions": 21, + "changes": 124, + "patch": "@@ -132,7 +132,7 @@ module Test @@ -1000,7 +1000,7 @@ module Test" + }, + { + "sha": "f61aebed695e2e4193db5e6dcb09b5b57875f334", + "filename": "file2.txt", + "status": "modified", + "additions": 5, + "deletions": 3, + "changes": 103, + "patch": "@@ -132,7 +132,7 @@ module Test @@ -1000,7 +1000,7 @@ module Test" + } + ]`) + }) + + opt := &ListOptions{Page: 2} + commitFiles, _, err := client.PullRequests.ListFiles(context.Background(), "o", "r", 1, opt) + if err != nil { + t.Errorf("PullRequests.ListFiles returned error: %v", err) + } + + want := []*CommitFile{ + { + SHA: String("6dcb09b5b57875f334f61aebed695e2e4193db5e"), + Filename: String("file1.txt"), + Additions: Int(103), + Deletions: Int(21), + Changes: Int(124), + Status: String("added"), + Patch: String("@@ -132,7 +132,7 @@ module Test @@ -1000,7 +1000,7 @@ module Test"), + }, + { + SHA: String("f61aebed695e2e4193db5e6dcb09b5b57875f334"), + Filename: String("file2.txt"), + Additions: Int(5), + Deletions: Int(3), + Changes: Int(103), + Status: String("modified"), + Patch: String("@@ -132,7 +132,7 @@ module Test @@ -1000,7 +1000,7 @@ module Test"), + }, + } + + if !reflect.DeepEqual(commitFiles, want) { + t.Errorf("PullRequests.ListFiles returned %+v, want %+v", commitFiles, want) + } +} + +func TestPullRequestsService_IsMerged(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/1/merge", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNoContent) + }) + + isMerged, _, err := client.PullRequests.IsMerged(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("PullRequests.IsMerged returned error: %v", err) + } + + want := true + if !reflect.DeepEqual(isMerged, want) { + t.Errorf("PullRequests.IsMerged returned %+v, want %+v", isMerged, want) + } +} + +func TestPullRequestsService_Merge(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/1/merge", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PUT") + testHeader(t, r, "Accept", mediaTypeSquashPreview) + fmt.Fprint(w, ` + { + "sha": "6dcb09b5b57875f334f61aebed695e2e4193db5e", + "merged": true, + "message": "Pull Request successfully merged" + }`) + }) + + options := &PullRequestOptions{MergeMethod: "rebase"} + merge, _, err := client.PullRequests.Merge(context.Background(), "o", "r", 1, "merging pull request", options) + if err != nil { + t.Errorf("PullRequests.Merge returned error: %v", err) + } + + want := &PullRequestMergeResult{ + SHA: String("6dcb09b5b57875f334f61aebed695e2e4193db5e"), + Merged: Bool(true), + Message: String("Pull Request successfully merged"), + } + if !reflect.DeepEqual(merge, want) { + t.Errorf("PullRequests.Merge returned %+v, want %+v", merge, want) + } +} + +// Test that different merge options produce expected PUT requests. See issue https://github.com/google/go-github/issues/500. +func TestPullRequestsService_Merge_options(t *testing.T) { + setup() + defer teardown() + + tests := []struct { + options *PullRequestOptions + wantBody string + }{ + { + options: nil, + wantBody: `{"commit_message":"merging pull request"}`, + }, + { + options: &PullRequestOptions{}, + wantBody: `{"commit_message":"merging pull request"}`, + }, + { + options: &PullRequestOptions{MergeMethod: "rebase"}, + wantBody: `{"commit_message":"merging pull request","merge_method":"rebase"}`, + }, + { + options: &PullRequestOptions{SHA: "6dcb09b5b57875f334f61aebed695e2e4193db5e"}, + wantBody: `{"commit_message":"merging pull request","sha":"6dcb09b5b57875f334f61aebed695e2e4193db5e"}`, + }, + { + options: &PullRequestOptions{ + CommitTitle: "Extra detail", + SHA: "6dcb09b5b57875f334f61aebed695e2e4193db5e", + MergeMethod: "squash", + }, + wantBody: `{"commit_message":"merging pull request","commit_title":"Extra detail","merge_method":"squash","sha":"6dcb09b5b57875f334f61aebed695e2e4193db5e"}`, + }, + } + + for i, test := range tests { + madeRequest := false + mux.HandleFunc(fmt.Sprintf("/repos/o/r/pulls/%d/merge", i), func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PUT") + testHeader(t, r, "Accept", mediaTypeSquashPreview) + testBody(t, r, test.wantBody+"\n") + madeRequest = true + }) + _, _, _ = client.PullRequests.Merge(context.Background(), "o", "r", i, "merging pull request", test.options) + if !madeRequest { + t.Errorf("%d: PullRequests.Merge(%#v): expected request was not made", i, test.options) + } + } +} diff --git a/vendor/github.com/google/go-github/github/reactions_test.go b/vendor/github.com/google/go-github/github/reactions_test.go new file mode 100644 index 000000000000..2725298279ed --- /dev/null +++ b/vendor/github.com/google/go-github/github/reactions_test.go @@ -0,0 +1,201 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "net/http" + "reflect" + "testing" +) + +func TestReactionsService_ListCommentReactions(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/comments/1/reactions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + + w.WriteHeader(http.StatusOK) + w.Write([]byte(`[{"id":1,"user":{"login":"l","id":2},"content":"+1"}]`)) + }) + + got, _, err := client.Reactions.ListCommentReactions(context.Background(), "o", "r", 1, nil) + if err != nil { + t.Errorf("ListCommentReactions returned error: %v", err) + } + if want := []*Reaction{{ID: Int(1), User: &User{Login: String("l"), ID: Int(2)}, Content: String("+1")}}; !reflect.DeepEqual(got, want) { + t.Errorf("ListCommentReactions = %+v, want %+v", got, want) + } +} + +func TestReactionsService_CreateCommentReaction(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/comments/1/reactions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"id":1,"user":{"login":"l","id":2},"content":"+1"}`)) + }) + + got, _, err := client.Reactions.CreateCommentReaction(context.Background(), "o", "r", 1, "+1") + if err != nil { + t.Errorf("CreateCommentReaction returned error: %v", err) + } + want := &Reaction{ID: Int(1), User: &User{Login: String("l"), ID: Int(2)}, Content: String("+1")} + if !reflect.DeepEqual(got, want) { + t.Errorf("CreateCommentReaction = %+v, want %+v", got, want) + } +} + +func TestReactionsService_ListIssueReactions(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/1/reactions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + + w.WriteHeader(http.StatusOK) + w.Write([]byte(`[{"id":1,"user":{"login":"l","id":2},"content":"+1"}]`)) + }) + + got, _, err := client.Reactions.ListIssueReactions(context.Background(), "o", "r", 1, nil) + if err != nil { + t.Errorf("ListIssueReactions returned error: %v", err) + } + if want := []*Reaction{{ID: Int(1), User: &User{Login: String("l"), ID: Int(2)}, Content: String("+1")}}; !reflect.DeepEqual(got, want) { + t.Errorf("ListIssueReactions = %+v, want %+v", got, want) + } +} + +func TestReactionsService_CreateIssueReaction(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/1/reactions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"id":1,"user":{"login":"l","id":2},"content":"+1"}`)) + }) + + got, _, err := client.Reactions.CreateIssueReaction(context.Background(), "o", "r", 1, "+1") + if err != nil { + t.Errorf("CreateIssueReaction returned error: %v", err) + } + want := &Reaction{ID: Int(1), User: &User{Login: String("l"), ID: Int(2)}, Content: String("+1")} + if !reflect.DeepEqual(got, want) { + t.Errorf("CreateIssueReaction = %+v, want %+v", got, want) + } +} + +func TestReactionsService_ListIssueCommentReactions(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/comments/1/reactions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + + w.WriteHeader(http.StatusOK) + w.Write([]byte(`[{"id":1,"user":{"login":"l","id":2},"content":"+1"}]`)) + }) + + got, _, err := client.Reactions.ListIssueCommentReactions(context.Background(), "o", "r", 1, nil) + if err != nil { + t.Errorf("ListIssueCommentReactions returned error: %v", err) + } + if want := []*Reaction{{ID: Int(1), User: &User{Login: String("l"), ID: Int(2)}, Content: String("+1")}}; !reflect.DeepEqual(got, want) { + t.Errorf("ListIssueCommentReactions = %+v, want %+v", got, want) + } +} + +func TestReactionsService_CreateIssueCommentReaction(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/issues/comments/1/reactions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"id":1,"user":{"login":"l","id":2},"content":"+1"}`)) + }) + + got, _, err := client.Reactions.CreateIssueCommentReaction(context.Background(), "o", "r", 1, "+1") + if err != nil { + t.Errorf("CreateIssueCommentReaction returned error: %v", err) + } + want := &Reaction{ID: Int(1), User: &User{Login: String("l"), ID: Int(2)}, Content: String("+1")} + if !reflect.DeepEqual(got, want) { + t.Errorf("CreateIssueCommentReaction = %+v, want %+v", got, want) + } +} + +func TestReactionsService_ListPullRequestCommentReactions(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/comments/1/reactions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + + w.WriteHeader(http.StatusOK) + w.Write([]byte(`[{"id":1,"user":{"login":"l","id":2},"content":"+1"}]`)) + }) + + got, _, err := client.Reactions.ListPullRequestCommentReactions(context.Background(), "o", "r", 1, nil) + if err != nil { + t.Errorf("ListPullRequestCommentReactions returned error: %v", err) + } + if want := []*Reaction{{ID: Int(1), User: &User{Login: String("l"), ID: Int(2)}, Content: String("+1")}}; !reflect.DeepEqual(got, want) { + t.Errorf("ListPullRequestCommentReactions = %+v, want %+v", got, want) + } +} + +func TestReactionsService_CreatePullRequestCommentReaction(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pulls/comments/1/reactions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"id":1,"user":{"login":"l","id":2},"content":"+1"}`)) + }) + + got, _, err := client.Reactions.CreatePullRequestCommentReaction(context.Background(), "o", "r", 1, "+1") + if err != nil { + t.Errorf("CreatePullRequestCommentReaction returned error: %v", err) + } + want := &Reaction{ID: Int(1), User: &User{Login: String("l"), ID: Int(2)}, Content: String("+1")} + if !reflect.DeepEqual(got, want) { + t.Errorf("CreatePullRequestCommentReaction = %+v, want %+v", got, want) + } +} + +func TestReactionsService_DeleteReaction(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/reactions/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + + w.WriteHeader(http.StatusNoContent) + }) + + if _, err := client.Reactions.DeleteReaction(context.Background(), 1); err != nil { + t.Errorf("DeleteReaction returned error: %v", err) + } +} diff --git a/vendor/github.com/google/go-github/github/repos_collaborators_test.go b/vendor/github.com/google/go-github/github/repos_collaborators_test.go new file mode 100644 index 000000000000..d2e125cb8bfe --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_collaborators_test.go @@ -0,0 +1,161 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestRepositoriesService_ListCollaborators(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/collaborators", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprintf(w, `[{"id":1}, {"id":2}]`) + }) + + opt := &ListOptions{Page: 2} + users, _, err := client.Repositories.ListCollaborators(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Repositories.ListCollaborators returned error: %v", err) + } + + want := []*User{{ID: Int(1)}, {ID: Int(2)}} + if !reflect.DeepEqual(users, want) { + t.Errorf("Repositories.ListCollaborators returned %+v, want %+v", users, want) + } +} + +func TestRepositoriesService_ListCollaborators_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.ListCollaborators(context.Background(), "%", "%", nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_IsCollaborator_True(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/collaborators/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNoContent) + }) + + isCollab, _, err := client.Repositories.IsCollaborator(context.Background(), "o", "r", "u") + if err != nil { + t.Errorf("Repositories.IsCollaborator returned error: %v", err) + } + + if !isCollab { + t.Errorf("Repositories.IsCollaborator returned false, want true") + } +} + +func TestRepositoriesService_IsCollaborator_False(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/collaborators/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNotFound) + }) + + isCollab, _, err := client.Repositories.IsCollaborator(context.Background(), "o", "r", "u") + if err != nil { + t.Errorf("Repositories.IsCollaborator returned error: %v", err) + } + + if isCollab { + t.Errorf("Repositories.IsCollaborator returned true, want false") + } +} + +func TestRepositoriesService_IsCollaborator_invalidUser(t *testing.T) { + _, _, err := client.Repositories.IsCollaborator(context.Background(), "%", "%", "%") + testURLParseError(t, err) +} + +func TestRepositoryService_GetPermissionLevel(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/collaborators/u/permission", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprintf(w, `{"permission":"admin","user":{"login":"u"}}`) + }) + + rpl, _, err := client.Repositories.GetPermissionLevel(context.Background(), "o", "r", "u") + if err != nil { + t.Errorf("Repositories.GetPermissionLevel returned error: %v", err) + } + + want := &RepositoryPermissionLevel{ + Permission: String("admin"), + User: &User{ + Login: String("u"), + }, + } + + if !reflect.DeepEqual(rpl, want) { + t.Errorf("Repositories.GetPermissionLevel returned %+v, want %+v", rpl, want) + } +} + +func TestRepositoriesService_AddCollaborator(t *testing.T) { + setup() + defer teardown() + + opt := &RepositoryAddCollaboratorOptions{Permission: "admin"} + + mux.HandleFunc("/repos/o/r/collaborators/u", func(w http.ResponseWriter, r *http.Request) { + v := new(RepositoryAddCollaboratorOptions) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PUT") + testHeader(t, r, "Accept", mediaTypeRepositoryInvitationsPreview) + if !reflect.DeepEqual(v, opt) { + t.Errorf("Request body = %+v, want %+v", v, opt) + } + + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Repositories.AddCollaborator(context.Background(), "o", "r", "u", opt) + if err != nil { + t.Errorf("Repositories.AddCollaborator returned error: %v", err) + } +} + +func TestRepositoriesService_AddCollaborator_invalidUser(t *testing.T) { + _, err := client.Repositories.AddCollaborator(context.Background(), "%", "%", "%", nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_RemoveCollaborator(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/collaborators/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Repositories.RemoveCollaborator(context.Background(), "o", "r", "u") + if err != nil { + t.Errorf("Repositories.RemoveCollaborator returned error: %v", err) + } +} + +func TestRepositoriesService_RemoveCollaborator_invalidUser(t *testing.T) { + _, err := client.Repositories.RemoveCollaborator(context.Background(), "%", "%", "%") + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/repos_comments_test.go b/vendor/github.com/google/go-github/github/repos_comments_test.go new file mode 100644 index 000000000000..249a37a36772 --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_comments_test.go @@ -0,0 +1,184 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestRepositoriesService_ListComments(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/comments", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}, {"id":2}]`) + }) + + opt := &ListOptions{Page: 2} + comments, _, err := client.Repositories.ListComments(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Repositories.ListComments returned error: %v", err) + } + + want := []*RepositoryComment{{ID: Int(1)}, {ID: Int(2)}} + if !reflect.DeepEqual(comments, want) { + t.Errorf("Repositories.ListComments returned %+v, want %+v", comments, want) + } +} + +func TestRepositoriesService_ListComments_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.ListComments(context.Background(), "%", "%", nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_ListCommitComments(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/commits/s/comments", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}, {"id":2}]`) + }) + + opt := &ListOptions{Page: 2} + comments, _, err := client.Repositories.ListCommitComments(context.Background(), "o", "r", "s", opt) + if err != nil { + t.Errorf("Repositories.ListCommitComments returned error: %v", err) + } + + want := []*RepositoryComment{{ID: Int(1)}, {ID: Int(2)}} + if !reflect.DeepEqual(comments, want) { + t.Errorf("Repositories.ListCommitComments returned %+v, want %+v", comments, want) + } +} + +func TestRepositoriesService_ListCommitComments_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.ListCommitComments(context.Background(), "%", "%", "%", nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_CreateComment(t *testing.T) { + setup() + defer teardown() + + input := &RepositoryComment{Body: String("b")} + + mux.HandleFunc("/repos/o/r/commits/s/comments", func(w http.ResponseWriter, r *http.Request) { + v := new(RepositoryComment) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + comment, _, err := client.Repositories.CreateComment(context.Background(), "o", "r", "s", input) + if err != nil { + t.Errorf("Repositories.CreateComment returned error: %v", err) + } + + want := &RepositoryComment{ID: Int(1)} + if !reflect.DeepEqual(comment, want) { + t.Errorf("Repositories.CreateComment returned %+v, want %+v", comment, want) + } +} + +func TestRepositoriesService_CreateComment_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.CreateComment(context.Background(), "%", "%", "%", nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_GetComment(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/comments/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeReactionsPreview) + fmt.Fprint(w, `{"id":1}`) + }) + + comment, _, err := client.Repositories.GetComment(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Repositories.GetComment returned error: %v", err) + } + + want := &RepositoryComment{ID: Int(1)} + if !reflect.DeepEqual(comment, want) { + t.Errorf("Repositories.GetComment returned %+v, want %+v", comment, want) + } +} + +func TestRepositoriesService_GetComment_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.GetComment(context.Background(), "%", "%", 1) + testURLParseError(t, err) +} + +func TestRepositoriesService_UpdateComment(t *testing.T) { + setup() + defer teardown() + + input := &RepositoryComment{Body: String("b")} + + mux.HandleFunc("/repos/o/r/comments/1", func(w http.ResponseWriter, r *http.Request) { + v := new(RepositoryComment) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + comment, _, err := client.Repositories.UpdateComment(context.Background(), "o", "r", 1, input) + if err != nil { + t.Errorf("Repositories.UpdateComment returned error: %v", err) + } + + want := &RepositoryComment{ID: Int(1)} + if !reflect.DeepEqual(comment, want) { + t.Errorf("Repositories.UpdateComment returned %+v, want %+v", comment, want) + } +} + +func TestRepositoriesService_UpdateComment_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.UpdateComment(context.Background(), "%", "%", 1, nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_DeleteComment(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/comments/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Repositories.DeleteComment(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Repositories.DeleteComment returned error: %v", err) + } +} + +func TestRepositoriesService_DeleteComment_invalidOwner(t *testing.T) { + _, err := client.Repositories.DeleteComment(context.Background(), "%", "%", 1) + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/repos_commits_test.go b/vendor/github.com/google/go-github/github/repos_commits_test.go new file mode 100644 index 000000000000..7790a334bf7c --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_commits_test.go @@ -0,0 +1,266 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" + "time" +) + +func TestRepositoriesService_ListCommits(t *testing.T) { + setup() + defer teardown() + + // given + mux.HandleFunc("/repos/o/r/commits", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, + values{ + "sha": "s", + "path": "p", + "author": "a", + "since": "2013-08-01T00:00:00Z", + "until": "2013-09-03T00:00:00Z", + }) + fmt.Fprintf(w, `[{"sha": "s"}]`) + }) + + opt := &CommitsListOptions{ + SHA: "s", + Path: "p", + Author: "a", + Since: time.Date(2013, time.August, 1, 0, 0, 0, 0, time.UTC), + Until: time.Date(2013, time.September, 3, 0, 0, 0, 0, time.UTC), + } + commits, _, err := client.Repositories.ListCommits(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Repositories.ListCommits returned error: %v", err) + } + + want := []*RepositoryCommit{{SHA: String("s")}} + if !reflect.DeepEqual(commits, want) { + t.Errorf("Repositories.ListCommits returned %+v, want %+v", commits, want) + } +} + +func TestRepositoriesService_GetCommit(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/commits/s", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeGitSigningPreview) + fmt.Fprintf(w, `{ + "sha": "s", + "commit": { "message": "m" }, + "author": { "login": "l" }, + "committer": { "login": "l" }, + "parents": [ { "sha": "s" } ], + "stats": { "additions": 104, "deletions": 4, "total": 108 }, + "files": [ + { + "filename": "f", + "additions": 10, + "deletions": 2, + "changes": 12, + "status": "s", + "patch": "p", + "blob_url": "b", + "raw_url": "r", + "contents_url": "c" + } + ] + }`) + }) + + commit, _, err := client.Repositories.GetCommit(context.Background(), "o", "r", "s") + if err != nil { + t.Errorf("Repositories.GetCommit returned error: %v", err) + } + + want := &RepositoryCommit{ + SHA: String("s"), + Commit: &Commit{ + Message: String("m"), + }, + Author: &User{ + Login: String("l"), + }, + Committer: &User{ + Login: String("l"), + }, + Parents: []Commit{ + { + SHA: String("s"), + }, + }, + Stats: &CommitStats{ + Additions: Int(104), + Deletions: Int(4), + Total: Int(108), + }, + Files: []CommitFile{ + { + Filename: String("f"), + Additions: Int(10), + Deletions: Int(2), + Changes: Int(12), + Status: String("s"), + Patch: String("p"), + BlobURL: String("b"), + RawURL: String("r"), + ContentsURL: String("c"), + }, + }, + } + if !reflect.DeepEqual(commit, want) { + t.Errorf("Repositories.GetCommit returned \n%+v, want \n%+v", commit, want) + } +} + +func TestRepositoriesService_GetCommitSHA1(t *testing.T) { + setup() + defer teardown() + const sha1 = "01234abcde" + + mux.HandleFunc("/repos/o/r/commits/master", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeV3SHA) + + fmt.Fprintf(w, sha1) + }) + + got, _, err := client.Repositories.GetCommitSHA1(context.Background(), "o", "r", "master", "") + if err != nil { + t.Errorf("Repositories.GetCommitSHA1 returned error: %v", err) + } + + want := sha1 + if got != want { + t.Errorf("Repositories.GetCommitSHA1 = %v, want %v", got, want) + } + + mux.HandleFunc("/repos/o/r/commits/tag", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeV3SHA) + testHeader(t, r, "If-None-Match", `"`+sha1+`"`) + + w.WriteHeader(http.StatusNotModified) + }) + + got, _, err = client.Repositories.GetCommitSHA1(context.Background(), "o", "r", "tag", sha1) + if err == nil { + t.Errorf("Expected HTTP 304 response") + } + + want = "" + if got != want { + t.Errorf("Repositories.GetCommitSHA1 = %v, want %v", got, want) + } +} + +func TestRepositoriesService_CompareCommits(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/compare/b...h", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprintf(w, `{ + "base_commit": { + "sha": "s", + "commit": { + "author": { "name": "n" }, + "committer": { "name": "n" }, + "message": "m", + "tree": { "sha": "t" } + }, + "author": { "login": "l" }, + "committer": { "login": "l" }, + "parents": [ { "sha": "s" } ] + }, + "status": "s", + "ahead_by": 1, + "behind_by": 2, + "total_commits": 1, + "commits": [ + { + "sha": "s", + "commit": { "author": { "name": "n" } }, + "author": { "login": "l" }, + "committer": { "login": "l" }, + "parents": [ { "sha": "s" } ] + } + ], + "files": [ { "filename": "f" } ], + "html_url": "https://github.com/o/r/compare/b...h", + "permalink_url": "https://github.com/o/r/compare/o:bbcd538c8e72b8c175046e27cc8f907076331401...o:0328041d1152db8ae77652d1618a02e57f745f17", + "diff_url": "https://github.com/o/r/compare/b...h.diff", + "patch_url": "https://github.com/o/r/compare/b...h.patch", + "url": "https://api.github.com/repos/o/r/compare/b...h" + }`) + }) + + got, _, err := client.Repositories.CompareCommits(context.Background(), "o", "r", "b", "h") + if err != nil { + t.Errorf("Repositories.CompareCommits returned error: %v", err) + } + + want := &CommitsComparison{ + BaseCommit: &RepositoryCommit{ + SHA: String("s"), + Commit: &Commit{ + Author: &CommitAuthor{Name: String("n")}, + Committer: &CommitAuthor{Name: String("n")}, + Message: String("m"), + Tree: &Tree{SHA: String("t")}, + }, + Author: &User{Login: String("l")}, + Committer: &User{Login: String("l")}, + Parents: []Commit{ + { + SHA: String("s"), + }, + }, + }, + Status: String("s"), + AheadBy: Int(1), + BehindBy: Int(2), + TotalCommits: Int(1), + Commits: []RepositoryCommit{ + { + SHA: String("s"), + Commit: &Commit{ + Author: &CommitAuthor{Name: String("n")}, + }, + Author: &User{Login: String("l")}, + Committer: &User{Login: String("l")}, + Parents: []Commit{ + { + SHA: String("s"), + }, + }, + }, + }, + Files: []CommitFile{ + { + Filename: String("f"), + }, + }, + HTMLURL: String("https://github.com/o/r/compare/b...h"), + PermalinkURL: String("https://github.com/o/r/compare/o:bbcd538c8e72b8c175046e27cc8f907076331401...o:0328041d1152db8ae77652d1618a02e57f745f17"), + DiffURL: String("https://github.com/o/r/compare/b...h.diff"), + PatchURL: String("https://github.com/o/r/compare/b...h.patch"), + URL: String("https://api.github.com/repos/o/r/compare/b...h"), + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("Repositories.CompareCommits returned \n%+v, want \n%+v", got, want) + } +} diff --git a/vendor/github.com/google/go-github/github/repos_community_health_test.go b/vendor/github.com/google/go-github/github/repos_community_health_test.go new file mode 100644 index 000000000000..a90c8c038027 --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_community_health_test.go @@ -0,0 +1,86 @@ +// Copyright 2017 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" + "time" +) + +func TestRepositoriesService_GetCommunityHealthMetrics(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/community/profile", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeRepositoryCommunityHealthMetricsPreview) + fmt.Fprintf(w, `{ + "health_percentage": 100, + "files": { + "code_of_conduct": { + "name": "Contributor Covenant", + "key": "contributor_covenant", + "url": null, + "html_url": "https://github.com/octocat/Hello-World/blob/master/CODE_OF_CONDUCT.md" + }, + "contributing": { + "url": "https://api.github.com/repos/octocat/Hello-World/contents/CONTRIBUTING", + "html_url": "https://github.com/octocat/Hello-World/blob/master/CONTRIBUTING" + }, + "license": { + "name": "MIT License", + "key": "mit", + "url": "https://api.github.com/licenses/mit", + "html_url": "https://github.com/octocat/Hello-World/blob/master/LICENSE" + }, + "readme": { + "url": "https://api.github.com/repos/octocat/Hello-World/contents/README.md", + "html_url": "https://github.com/octocat/Hello-World/blob/master/README.md" + } + }, + "updated_at": "2017-02-28T00:00:00Z" + }`) + }) + + got, _, err := client.Repositories.GetCommunityHealthMetrics(context.Background(), "o", "r") + if err != nil { + t.Errorf("Repositories.GetCommunityHealthMetrics returned error: %v", err) + } + + updatedAt := time.Date(2017, 02, 28, 0, 0, 0, 0, time.UTC) + want := &CommunityHealthMetrics{ + HealthPercentage: Int(100), + UpdatedAt: &updatedAt, + Files: &CommunityHealthFiles{ + CodeOfConduct: &Metric{ + Name: String("Contributor Covenant"), + Key: String("contributor_covenant"), + HTMLURL: String("https://github.com/octocat/Hello-World/blob/master/CODE_OF_CONDUCT.md"), + }, + Contributing: &Metric{ + URL: String("https://api.github.com/repos/octocat/Hello-World/contents/CONTRIBUTING"), + HTMLURL: String("https://github.com/octocat/Hello-World/blob/master/CONTRIBUTING"), + }, + License: &Metric{ + Name: String("MIT License"), + Key: String("mit"), + URL: String("https://api.github.com/licenses/mit"), + HTMLURL: String("https://github.com/octocat/Hello-World/blob/master/LICENSE"), + }, + Readme: &Metric{ + URL: String("https://api.github.com/repos/octocat/Hello-World/contents/README.md"), + HTMLURL: String("https://github.com/octocat/Hello-World/blob/master/README.md"), + }, + }, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Repositories.GetCommunityHealthMetrics:\ngot:\n%v\nwant:\n%v", Stringify(got), Stringify(want)) + } +} diff --git a/vendor/github.com/google/go-github/github/repos_contents_test.go b/vendor/github.com/google/go-github/github/repos_contents_test.go new file mode 100644 index 000000000000..498dfc810016 --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_contents_test.go @@ -0,0 +1,378 @@ +// Copyright 2014 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "testing" +) + +func TestRepositoryContent_GetContent(t *testing.T) { + tests := []struct { + encoding, content *string // input encoding and content + want string // desired output + wantErr bool // whether an error is expected + }{ + { + encoding: String(""), + content: String("hello"), + want: "hello", + wantErr: false, + }, + { + encoding: nil, + content: String("hello"), + want: "hello", + wantErr: false, + }, + { + encoding: nil, + content: nil, + want: "", + wantErr: false, + }, + { + encoding: String("base64"), + content: String("aGVsbG8="), + want: "hello", + wantErr: false, + }, + { + encoding: String("bad"), + content: String("aGVsbG8="), + want: "", + wantErr: true, + }, + } + + for _, tt := range tests { + r := RepositoryContent{Encoding: tt.encoding, Content: tt.content} + got, err := r.GetContent() + if err != nil && !tt.wantErr { + t.Errorf("RepositoryContent(%q, %q) returned unexpected error: %v", tt.encoding, tt.content, err) + } + if err == nil && tt.wantErr { + t.Errorf("RepositoryContent(%q, %q) did not return unexpected error", tt.encoding, tt.content) + } + if want := tt.want; got != want { + t.Errorf("RepositoryContent.GetContent returned %+v, want %+v", got, want) + } + } +} + +func TestRepositoriesService_GetReadme(t *testing.T) { + setup() + defer teardown() + mux.HandleFunc("/repos/o/r/readme", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{ + "type": "file", + "encoding": "base64", + "size": 5362, + "name": "README.md", + "path": "README.md" + }`) + }) + readme, _, err := client.Repositories.GetReadme(context.Background(), "o", "r", &RepositoryContentGetOptions{}) + if err != nil { + t.Errorf("Repositories.GetReadme returned error: %v", err) + } + want := &RepositoryContent{Type: String("file"), Name: String("README.md"), Size: Int(5362), Encoding: String("base64"), Path: String("README.md")} + if !reflect.DeepEqual(readme, want) { + t.Errorf("Repositories.GetReadme returned %+v, want %+v", readme, want) + } +} + +func TestRepositoriesService_DownloadContents_Success(t *testing.T) { + setup() + defer teardown() + mux.HandleFunc("/repos/o/r/contents/d", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[{ + "type": "file", + "name": "f", + "download_url": "`+server.URL+`/download/f" + }]`) + }) + mux.HandleFunc("/download/f", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, "foo") + }) + + r, err := client.Repositories.DownloadContents(context.Background(), "o", "r", "d/f", nil) + if err != nil { + t.Errorf("Repositories.DownloadContents returned error: %v", err) + } + + bytes, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("Error reading response body: %v", err) + } + r.Close() + + if got, want := string(bytes), "foo"; got != want { + t.Errorf("Repositories.DownloadContents returned %v, want %v", got, want) + } +} + +func TestRepositoriesService_DownloadContents_NoDownloadURL(t *testing.T) { + setup() + defer teardown() + mux.HandleFunc("/repos/o/r/contents/d", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[{ + "type": "file", + "name": "f", + }]`) + }) + + _, err := client.Repositories.DownloadContents(context.Background(), "o", "r", "d/f", nil) + if err == nil { + t.Errorf("Repositories.DownloadContents did not return expected error") + } +} + +func TestRepositoriesService_DownloadContents_NoFile(t *testing.T) { + setup() + defer teardown() + mux.HandleFunc("/repos/o/r/contents/d", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[]`) + }) + + _, err := client.Repositories.DownloadContents(context.Background(), "o", "r", "d/f", nil) + if err == nil { + t.Errorf("Repositories.DownloadContents did not return expected error") + } +} + +func TestRepositoriesService_GetContents_File(t *testing.T) { + setup() + defer teardown() + mux.HandleFunc("/repos/o/r/contents/p", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{ + "type": "file", + "encoding": "base64", + "size": 20678, + "name": "LICENSE", + "path": "LICENSE" + }`) + }) + fileContents, _, _, err := client.Repositories.GetContents(context.Background(), "o", "r", "p", &RepositoryContentGetOptions{}) + if err != nil { + t.Errorf("Repositories.GetContents returned error: %v", err) + } + want := &RepositoryContent{Type: String("file"), Name: String("LICENSE"), Size: Int(20678), Encoding: String("base64"), Path: String("LICENSE")} + if !reflect.DeepEqual(fileContents, want) { + t.Errorf("Repositories.GetContents returned %+v, want %+v", fileContents, want) + } +} + +func TestRepositoriesService_GetContents_FilenameNeedsEscape(t *testing.T) { + setup() + defer teardown() + mux.HandleFunc("/repos/o/r/contents/p#?%/中.go", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{}`) + }) + _, _, _, err := client.Repositories.GetContents(context.Background(), "o", "r", "p#?%/中.go", &RepositoryContentGetOptions{}) + if err != nil { + t.Fatalf("Repositories.GetContents returned error: %v", err) + } +} + +func TestRepositoriesService_GetContents_DirectoryWithSpaces(t *testing.T) { + setup() + defer teardown() + mux.HandleFunc("/repos/o/r/contents/some directory/file.go", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{}`) + }) + _, _, _, err := client.Repositories.GetContents(context.Background(), "o", "r", "some directory/file.go", &RepositoryContentGetOptions{}) + if err != nil { + t.Fatalf("Repositories.GetContents returned error: %v", err) + } +} + +func TestRepositoriesService_GetContents_DirectoryWithPlusChars(t *testing.T) { + setup() + defer teardown() + mux.HandleFunc("/repos/o/r/contents/some directory+name/file.go", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{}`) + }) + _, _, _, err := client.Repositories.GetContents(context.Background(), "o", "r", "some directory+name/file.go", &RepositoryContentGetOptions{}) + if err != nil { + t.Fatalf("Repositories.GetContents returned error: %v", err) + } +} + +func TestRepositoriesService_GetContents_Directory(t *testing.T) { + setup() + defer teardown() + mux.HandleFunc("/repos/o/r/contents/p", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[{ + "type": "dir", + "name": "lib", + "path": "lib" + }, + { + "type": "file", + "size": 20678, + "name": "LICENSE", + "path": "LICENSE" + }]`) + }) + _, directoryContents, _, err := client.Repositories.GetContents(context.Background(), "o", "r", "p", &RepositoryContentGetOptions{}) + if err != nil { + t.Errorf("Repositories.GetContents returned error: %v", err) + } + want := []*RepositoryContent{{Type: String("dir"), Name: String("lib"), Path: String("lib")}, + {Type: String("file"), Name: String("LICENSE"), Size: Int(20678), Path: String("LICENSE")}} + if !reflect.DeepEqual(directoryContents, want) { + t.Errorf("Repositories.GetContents_Directory returned %+v, want %+v", directoryContents, want) + } +} + +func TestRepositoriesService_CreateFile(t *testing.T) { + setup() + defer teardown() + mux.HandleFunc("/repos/o/r/contents/p", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PUT") + fmt.Fprint(w, `{ + "content":{ + "name":"p" + }, + "commit":{ + "message":"m", + "sha":"f5f369044773ff9c6383c087466d12adb6fa0828" + } + }`) + }) + message := "m" + content := []byte("c") + repositoryContentsOptions := &RepositoryContentFileOptions{ + Message: &message, + Content: content, + Committer: &CommitAuthor{Name: String("n"), Email: String("e")}, + } + createResponse, _, err := client.Repositories.CreateFile(context.Background(), "o", "r", "p", repositoryContentsOptions) + if err != nil { + t.Errorf("Repositories.CreateFile returned error: %v", err) + } + want := &RepositoryContentResponse{ + Content: &RepositoryContent{Name: String("p")}, + Commit: Commit{ + Message: String("m"), + SHA: String("f5f369044773ff9c6383c087466d12adb6fa0828"), + }, + } + if !reflect.DeepEqual(createResponse, want) { + t.Errorf("Repositories.CreateFile returned %+v, want %+v", createResponse, want) + } +} + +func TestRepositoriesService_UpdateFile(t *testing.T) { + setup() + defer teardown() + mux.HandleFunc("/repos/o/r/contents/p", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PUT") + fmt.Fprint(w, `{ + "content":{ + "name":"p" + }, + "commit":{ + "message":"m", + "sha":"f5f369044773ff9c6383c087466d12adb6fa0828" + } + }`) + }) + message := "m" + content := []byte("c") + sha := "f5f369044773ff9c6383c087466d12adb6fa0828" + repositoryContentsOptions := &RepositoryContentFileOptions{ + Message: &message, + Content: content, + SHA: &sha, + Committer: &CommitAuthor{Name: String("n"), Email: String("e")}, + } + updateResponse, _, err := client.Repositories.UpdateFile(context.Background(), "o", "r", "p", repositoryContentsOptions) + if err != nil { + t.Errorf("Repositories.UpdateFile returned error: %v", err) + } + want := &RepositoryContentResponse{ + Content: &RepositoryContent{Name: String("p")}, + Commit: Commit{ + Message: String("m"), + SHA: String("f5f369044773ff9c6383c087466d12adb6fa0828"), + }, + } + if !reflect.DeepEqual(updateResponse, want) { + t.Errorf("Repositories.UpdateFile returned %+v, want %+v", updateResponse, want) + } +} + +func TestRepositoriesService_DeleteFile(t *testing.T) { + setup() + defer teardown() + mux.HandleFunc("/repos/o/r/contents/p", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + fmt.Fprint(w, `{ + "content": null, + "commit":{ + "message":"m", + "sha":"f5f369044773ff9c6383c087466d12adb6fa0828" + } + }`) + }) + message := "m" + sha := "f5f369044773ff9c6383c087466d12adb6fa0828" + repositoryContentsOptions := &RepositoryContentFileOptions{ + Message: &message, + SHA: &sha, + Committer: &CommitAuthor{Name: String("n"), Email: String("e")}, + } + deleteResponse, _, err := client.Repositories.DeleteFile(context.Background(), "o", "r", "p", repositoryContentsOptions) + if err != nil { + t.Errorf("Repositories.DeleteFile returned error: %v", err) + } + want := &RepositoryContentResponse{ + Content: nil, + Commit: Commit{ + Message: String("m"), + SHA: String("f5f369044773ff9c6383c087466d12adb6fa0828"), + }, + } + if !reflect.DeepEqual(deleteResponse, want) { + t.Errorf("Repositories.DeleteFile returned %+v, want %+v", deleteResponse, want) + } +} + +func TestRepositoriesService_GetArchiveLink(t *testing.T) { + setup() + defer teardown() + mux.HandleFunc("/repos/o/r/tarball", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + http.Redirect(w, r, "http://github.com/a", http.StatusFound) + }) + url, resp, err := client.Repositories.GetArchiveLink(context.Background(), "o", "r", Tarball, &RepositoryContentGetOptions{}) + if err != nil { + t.Errorf("Repositories.GetArchiveLink returned error: %v", err) + } + if resp.StatusCode != http.StatusFound { + t.Errorf("Repositories.GetArchiveLink returned status: %d, want %d", resp.StatusCode, http.StatusFound) + } + want := "http://github.com/a" + if url.String() != want { + t.Errorf("Repositories.GetArchiveLink returned %+v, want %+v", url.String(), want) + } +} diff --git a/vendor/github.com/google/go-github/github/repos_deployments_test.go b/vendor/github.com/google/go-github/github/repos_deployments_test.go new file mode 100644 index 000000000000..9fa44b2ba4c7 --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_deployments_test.go @@ -0,0 +1,161 @@ +// Copyright 2014 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestRepositoriesService_ListDeployments(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/deployments", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"environment": "test"}) + fmt.Fprint(w, `[{"id":1}, {"id":2}]`) + }) + + opt := &DeploymentsListOptions{Environment: "test"} + deployments, _, err := client.Repositories.ListDeployments(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Repositories.ListDeployments returned error: %v", err) + } + + want := []*Deployment{{ID: Int(1)}, {ID: Int(2)}} + if !reflect.DeepEqual(deployments, want) { + t.Errorf("Repositories.ListDeployments returned %+v, want %+v", deployments, want) + } +} + +func TestRepositoriesService_GetDeployment(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/deployments/3", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":3}`) + }) + + deployment, _, err := client.Repositories.GetDeployment(context.Background(), "o", "r", 3) + if err != nil { + t.Errorf("Repositories.GetDeployment returned error: %v", err) + } + + want := &Deployment{ID: Int(3)} + + if !reflect.DeepEqual(deployment, want) { + t.Errorf("Repositories.GetDeployment returned %+v, want %+v", deployment, want) + } +} + +func TestRepositoriesService_CreateDeployment(t *testing.T) { + setup() + defer teardown() + + input := &DeploymentRequest{Ref: String("1111"), Task: String("deploy"), TransientEnvironment: Bool(true)} + + mux.HandleFunc("/repos/o/r/deployments", func(w http.ResponseWriter, r *http.Request) { + v := new(DeploymentRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + testHeader(t, r, "Accept", mediaTypeDeploymentStatusPreview) + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"ref": "1111", "task": "deploy"}`) + }) + + deployment, _, err := client.Repositories.CreateDeployment(context.Background(), "o", "r", input) + if err != nil { + t.Errorf("Repositories.CreateDeployment returned error: %v", err) + } + + want := &Deployment{Ref: String("1111"), Task: String("deploy")} + if !reflect.DeepEqual(deployment, want) { + t.Errorf("Repositories.CreateDeployment returned %+v, want %+v", deployment, want) + } +} + +func TestRepositoriesService_ListDeploymentStatuses(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/deployments/1/statuses", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}, {"id":2}]`) + }) + + opt := &ListOptions{Page: 2} + statutses, _, err := client.Repositories.ListDeploymentStatuses(context.Background(), "o", "r", 1, opt) + if err != nil { + t.Errorf("Repositories.ListDeploymentStatuses returned error: %v", err) + } + + want := []*DeploymentStatus{{ID: Int(1)}, {ID: Int(2)}} + if !reflect.DeepEqual(statutses, want) { + t.Errorf("Repositories.ListDeploymentStatuses returned %+v, want %+v", statutses, want) + } +} + +func TestRepositoriesService_GetDeploymentStatus(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/deployments/3/statuses/4", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeDeploymentStatusPreview) + fmt.Fprint(w, `{"id":4}`) + }) + + deploymentStatus, _, err := client.Repositories.GetDeploymentStatus(context.Background(), "o", "r", 3, 4) + if err != nil { + t.Errorf("Repositories.GetDeploymentStatus returned error: %v", err) + } + + want := &DeploymentStatus{ID: Int(4)} + if !reflect.DeepEqual(deploymentStatus, want) { + t.Errorf("Repositories.GetDeploymentStatus returned %+v, want %+v", deploymentStatus, want) + } +} + +func TestRepositoriesService_CreateDeploymentStatus(t *testing.T) { + setup() + defer teardown() + + input := &DeploymentStatusRequest{State: String("inactive"), Description: String("deploy"), AutoInactive: Bool(false)} + + mux.HandleFunc("/repos/o/r/deployments/1/statuses", func(w http.ResponseWriter, r *http.Request) { + v := new(DeploymentStatusRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + testHeader(t, r, "Accept", mediaTypeDeploymentStatusPreview) + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"state": "inactive", "description": "deploy"}`) + }) + + deploymentStatus, _, err := client.Repositories.CreateDeploymentStatus(context.Background(), "o", "r", 1, input) + if err != nil { + t.Errorf("Repositories.CreateDeploymentStatus returned error: %v", err) + } + + want := &DeploymentStatus{State: String("inactive"), Description: String("deploy")} + if !reflect.DeepEqual(deploymentStatus, want) { + t.Errorf("Repositories.CreateDeploymentStatus returned %+v, want %+v", deploymentStatus, want) + } +} diff --git a/vendor/github.com/google/go-github/github/repos_forks_test.go b/vendor/github.com/google/go-github/github/repos_forks_test.go new file mode 100644 index 000000000000..40916ea4ea71 --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_forks_test.go @@ -0,0 +1,74 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestRepositoriesService_ListForks(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/forks", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "sort": "newest", + "page": "3", + }) + fmt.Fprint(w, `[{"id":1},{"id":2}]`) + }) + + opt := &RepositoryListForksOptions{ + Sort: "newest", + ListOptions: ListOptions{Page: 3}, + } + repos, _, err := client.Repositories.ListForks(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Repositories.ListForks returned error: %v", err) + } + + want := []*Repository{{ID: Int(1)}, {ID: Int(2)}} + if !reflect.DeepEqual(repos, want) { + t.Errorf("Repositories.ListForks returned %+v, want %+v", repos, want) + } +} + +func TestRepositoriesService_ListForks_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.ListForks(context.Background(), "%", "r", nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_CreateFork(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/forks", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + testFormValues(t, r, values{"organization": "o"}) + fmt.Fprint(w, `{"id":1}`) + }) + + opt := &RepositoryCreateForkOptions{Organization: "o"} + repo, _, err := client.Repositories.CreateFork(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Repositories.CreateFork returned error: %v", err) + } + + want := &Repository{ID: Int(1)} + if !reflect.DeepEqual(repo, want) { + t.Errorf("Repositories.CreateFork returned %+v, want %+v", repo, want) + } +} + +func TestRepositoriesService_CreateFork_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.CreateFork(context.Background(), "%", "r", nil) + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/repos_hooks_test.go b/vendor/github.com/google/go-github/github/repos_hooks_test.go new file mode 100644 index 000000000000..364f4583f08b --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_hooks_test.go @@ -0,0 +1,188 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestRepositoriesService_CreateHook(t *testing.T) { + setup() + defer teardown() + + input := &Hook{Name: String("t")} + + mux.HandleFunc("/repos/o/r/hooks", func(w http.ResponseWriter, r *http.Request) { + v := new(Hook) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + hook, _, err := client.Repositories.CreateHook(context.Background(), "o", "r", input) + if err != nil { + t.Errorf("Repositories.CreateHook returned error: %v", err) + } + + want := &Hook{ID: Int(1)} + if !reflect.DeepEqual(hook, want) { + t.Errorf("Repositories.CreateHook returned %+v, want %+v", hook, want) + } +} + +func TestRepositoriesService_CreateHook_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.CreateHook(context.Background(), "%", "%", nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_ListHooks(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/hooks", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}, {"id":2}]`) + }) + + opt := &ListOptions{Page: 2} + + hooks, _, err := client.Repositories.ListHooks(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Repositories.ListHooks returned error: %v", err) + } + + want := []*Hook{{ID: Int(1)}, {ID: Int(2)}} + if !reflect.DeepEqual(hooks, want) { + t.Errorf("Repositories.ListHooks returned %+v, want %+v", hooks, want) + } +} + +func TestRepositoriesService_ListHooks_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.ListHooks(context.Background(), "%", "%", nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_GetHook(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/hooks/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":1}`) + }) + + hook, _, err := client.Repositories.GetHook(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Repositories.GetHook returned error: %v", err) + } + + want := &Hook{ID: Int(1)} + if !reflect.DeepEqual(hook, want) { + t.Errorf("Repositories.GetHook returned %+v, want %+v", hook, want) + } +} + +func TestRepositoriesService_GetHook_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.GetHook(context.Background(), "%", "%", 1) + testURLParseError(t, err) +} + +func TestRepositoriesService_EditHook(t *testing.T) { + setup() + defer teardown() + + input := &Hook{Name: String("t")} + + mux.HandleFunc("/repos/o/r/hooks/1", func(w http.ResponseWriter, r *http.Request) { + v := new(Hook) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + hook, _, err := client.Repositories.EditHook(context.Background(), "o", "r", 1, input) + if err != nil { + t.Errorf("Repositories.EditHook returned error: %v", err) + } + + want := &Hook{ID: Int(1)} + if !reflect.DeepEqual(hook, want) { + t.Errorf("Repositories.EditHook returned %+v, want %+v", hook, want) + } +} + +func TestRepositoriesService_EditHook_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.EditHook(context.Background(), "%", "%", 1, nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_DeleteHook(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/hooks/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Repositories.DeleteHook(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Repositories.DeleteHook returned error: %v", err) + } +} + +func TestRepositoriesService_DeleteHook_invalidOwner(t *testing.T) { + _, err := client.Repositories.DeleteHook(context.Background(), "%", "%", 1) + testURLParseError(t, err) +} + +func TestRepositoriesService_PingHook(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/hooks/1/pings", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + }) + + _, err := client.Repositories.PingHook(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Repositories.PingHook returned error: %v", err) + } +} + +func TestRepositoriesService_TestHook(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/hooks/1/tests", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + }) + + _, err := client.Repositories.TestHook(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Repositories.TestHook returned error: %v", err) + } +} + +func TestRepositoriesService_TestHook_invalidOwner(t *testing.T) { + _, err := client.Repositories.TestHook(context.Background(), "%", "%", 1) + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/repos_invitations_test.go b/vendor/github.com/google/go-github/github/repos_invitations_test.go new file mode 100644 index 000000000000..01ac66657fca --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_invitations_test.go @@ -0,0 +1,74 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestRepositoriesService_ListInvitations(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/invitations", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeRepositoryInvitationsPreview) + testFormValues(t, r, values{"page": "2"}) + fmt.Fprintf(w, `[{"id":1}, {"id":2}]`) + }) + + opt := &ListOptions{Page: 2} + got, _, err := client.Repositories.ListInvitations(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Repositories.ListInvitations returned error: %v", err) + } + + want := []*RepositoryInvitation{{ID: Int(1)}, {ID: Int(2)}} + if !reflect.DeepEqual(got, want) { + t.Errorf("Repositories.ListInvitations = %+v, want %+v", got, want) + } +} + +func TestRepositoriesService_DeleteInvitation(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/invitations/2", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + testHeader(t, r, "Accept", mediaTypeRepositoryInvitationsPreview) + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Repositories.DeleteInvitation(context.Background(), "o", "r", 2) + if err != nil { + t.Errorf("Repositories.DeleteInvitation returned error: %v", err) + } +} + +func TestRepositoriesService_UpdateInvitation(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/invitations/2", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PATCH") + testHeader(t, r, "Accept", mediaTypeRepositoryInvitationsPreview) + fmt.Fprintf(w, `{"id":1}`) + }) + + got, _, err := client.Repositories.UpdateInvitation(context.Background(), "o", "r", 2, "write") + if err != nil { + t.Errorf("Repositories.UpdateInvitation returned error: %v", err) + } + + want := &RepositoryInvitation{ID: Int(1)} + if !reflect.DeepEqual(got, want) { + t.Errorf("Repositories.UpdateInvitation = %+v, want %+v", got, want) + } +} diff --git a/vendor/github.com/google/go-github/github/repos_keys_test.go b/vendor/github.com/google/go-github/github/repos_keys_test.go new file mode 100644 index 000000000000..2ef04fdcb623 --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_keys_test.go @@ -0,0 +1,154 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestRepositoriesService_ListKeys(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/keys", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 2} + keys, _, err := client.Repositories.ListKeys(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Repositories.ListKeys returned error: %v", err) + } + + want := []*Key{{ID: Int(1)}} + if !reflect.DeepEqual(keys, want) { + t.Errorf("Repositories.ListKeys returned %+v, want %+v", keys, want) + } +} + +func TestRepositoriesService_ListKeys_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.ListKeys(context.Background(), "%", "%", nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_GetKey(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/keys/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":1}`) + }) + + key, _, err := client.Repositories.GetKey(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Repositories.GetKey returned error: %v", err) + } + + want := &Key{ID: Int(1)} + if !reflect.DeepEqual(key, want) { + t.Errorf("Repositories.GetKey returned %+v, want %+v", key, want) + } +} + +func TestRepositoriesService_GetKey_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.GetKey(context.Background(), "%", "%", 1) + testURLParseError(t, err) +} + +func TestRepositoriesService_CreateKey(t *testing.T) { + setup() + defer teardown() + + input := &Key{Key: String("k"), Title: String("t")} + + mux.HandleFunc("/repos/o/r/keys", func(w http.ResponseWriter, r *http.Request) { + v := new(Key) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + key, _, err := client.Repositories.CreateKey(context.Background(), "o", "r", input) + if err != nil { + t.Errorf("Repositories.GetKey returned error: %v", err) + } + + want := &Key{ID: Int(1)} + if !reflect.DeepEqual(key, want) { + t.Errorf("Repositories.GetKey returned %+v, want %+v", key, want) + } +} + +func TestRepositoriesService_CreateKey_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.CreateKey(context.Background(), "%", "%", nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_EditKey(t *testing.T) { + setup() + defer teardown() + + input := &Key{Key: String("k"), Title: String("t")} + + mux.HandleFunc("/repos/o/r/keys/1", func(w http.ResponseWriter, r *http.Request) { + v := new(Key) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + key, _, err := client.Repositories.EditKey(context.Background(), "o", "r", 1, input) + if err != nil { + t.Errorf("Repositories.EditKey returned error: %v", err) + } + + want := &Key{ID: Int(1)} + if !reflect.DeepEqual(key, want) { + t.Errorf("Repositories.EditKey returned %+v, want %+v", key, want) + } +} + +func TestRepositoriesService_EditKey_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.EditKey(context.Background(), "%", "%", 1, nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_DeleteKey(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/keys/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Repositories.DeleteKey(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Repositories.DeleteKey returned error: %v", err) + } +} + +func TestRepositoriesService_DeleteKey_invalidOwner(t *testing.T) { + _, err := client.Repositories.DeleteKey(context.Background(), "%", "%", 1) + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/repos_merging_test.go b/vendor/github.com/google/go-github/github/repos_merging_test.go new file mode 100644 index 000000000000..e2e264fd6604 --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_merging_test.go @@ -0,0 +1,48 @@ +// Copyright 2014 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestRepositoriesService_Merge(t *testing.T) { + setup() + defer teardown() + + input := &RepositoryMergeRequest{ + Base: String("b"), + Head: String("h"), + CommitMessage: String("c"), + } + + mux.HandleFunc("/repos/o/r/merges", func(w http.ResponseWriter, r *http.Request) { + v := new(RepositoryMergeRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"sha":"s"}`) + }) + + commit, _, err := client.Repositories.Merge(context.Background(), "o", "r", input) + if err != nil { + t.Errorf("Repositories.Merge returned error: %v", err) + } + + want := &RepositoryCommit{SHA: String("s")} + if !reflect.DeepEqual(commit, want) { + t.Errorf("Repositories.Merge returned %+v, want %+v", commit, want) + } +} diff --git a/vendor/github.com/google/go-github/github/repos_pages_test.go b/vendor/github.com/google/go-github/github/repos_pages_test.go new file mode 100644 index 000000000000..c9e19a1f5a10 --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_pages_test.go @@ -0,0 +1,134 @@ +// Copyright 2014 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestRepositoriesService_GetPagesInfo(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pages", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypePagesPreview) + fmt.Fprint(w, `{"url":"u","status":"s","cname":"c","custom_404":false,"html_url":"h"}`) + }) + + page, _, err := client.Repositories.GetPagesInfo(context.Background(), "o", "r") + if err != nil { + t.Errorf("Repositories.GetPagesInfo returned error: %v", err) + } + + want := &Pages{URL: String("u"), Status: String("s"), CNAME: String("c"), Custom404: Bool(false), HTMLURL: String("h")} + if !reflect.DeepEqual(page, want) { + t.Errorf("Repositories.GetPagesInfo returned %+v, want %+v", page, want) + } +} + +func TestRepositoriesService_ListPagesBuilds(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pages/builds", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[{"url":"u","status":"s","commit":"c"}]`) + }) + + pages, _, err := client.Repositories.ListPagesBuilds(context.Background(), "o", "r", nil) + if err != nil { + t.Errorf("Repositories.ListPagesBuilds returned error: %v", err) + } + + want := []*PagesBuild{{URL: String("u"), Status: String("s"), Commit: String("c")}} + if !reflect.DeepEqual(pages, want) { + t.Errorf("Repositories.ListPagesBuilds returned %+v, want %+v", pages, want) + } +} + +func TestRepositoriesService_ListPagesBuilds_withOptions(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pages/builds", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + fmt.Fprint(w, `[]`) + }) + + _, _, err := client.Repositories.ListPagesBuilds(context.Background(), "o", "r", &ListOptions{Page: 2}) + if err != nil { + t.Errorf("Repositories.ListPagesBuilds returned error: %v", err) + } +} + +func TestRepositoriesService_GetLatestPagesBuild(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pages/builds/latest", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"url":"u","status":"s","commit":"c"}`) + }) + + build, _, err := client.Repositories.GetLatestPagesBuild(context.Background(), "o", "r") + if err != nil { + t.Errorf("Repositories.GetLatestPagesBuild returned error: %v", err) + } + + want := &PagesBuild{URL: String("u"), Status: String("s"), Commit: String("c")} + if !reflect.DeepEqual(build, want) { + t.Errorf("Repositories.GetLatestPagesBuild returned %+v, want %+v", build, want) + } +} + +func TestRepositoriesService_GetPageBuild(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pages/builds/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"url":"u","status":"s","commit":"c"}`) + }) + + build, _, err := client.Repositories.GetPageBuild(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Repositories.GetPageBuild returned error: %v", err) + } + + want := &PagesBuild{URL: String("u"), Status: String("s"), Commit: String("c")} + if !reflect.DeepEqual(build, want) { + t.Errorf("Repositories.GetPageBuild returned %+v, want %+v", build, want) + } +} + +func TestRepositoriesService_RequestPageBuild(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/pages/builds", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + testHeader(t, r, "Accept", mediaTypePagesPreview) + fmt.Fprint(w, `{"url":"u","status":"s"}`) + }) + + build, _, err := client.Repositories.RequestPageBuild(context.Background(), "o", "r") + if err != nil { + t.Errorf("Repositories.RequestPageBuild returned error: %v", err) + } + + want := &PagesBuild{URL: String("u"), Status: String("s")} + if !reflect.DeepEqual(build, want) { + t.Errorf("Repositories.RequestPageBuild returned %+v, want %+v", build, want) + } +} diff --git a/vendor/github.com/google/go-github/github/repos_projects_test.go b/vendor/github.com/google/go-github/github/repos_projects_test.go new file mode 100644 index 000000000000..57dbdd1b3fee --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_projects_test.go @@ -0,0 +1,68 @@ +// Copyright 2017 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestRepositoriesService_ListProjects(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/projects", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ProjectListOptions{ListOptions: ListOptions{Page: 2}} + projects, _, err := client.Repositories.ListProjects(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Repositories.ListProjects returned error: %v", err) + } + + want := []*Project{{ID: Int(1)}} + if !reflect.DeepEqual(projects, want) { + t.Errorf("Repositories.ListProjects returned %+v, want %+v", projects, want) + } +} + +func TestRepositoriesService_CreateProject(t *testing.T) { + setup() + defer teardown() + + input := &ProjectOptions{Name: "Project Name", Body: "Project body."} + + mux.HandleFunc("/repos/o/r/projects", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + testHeader(t, r, "Accept", mediaTypeProjectsPreview) + + v := &ProjectOptions{} + json.NewDecoder(r.Body).Decode(v) + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + project, _, err := client.Repositories.CreateProject(context.Background(), "o", "r", input) + if err != nil { + t.Errorf("Repositories.CreateProject returned error: %v", err) + } + + want := &Project{ID: Int(1)} + if !reflect.DeepEqual(project, want) { + t.Errorf("Repositories.CreateProject returned %+v, want %+v", project, want) + } +} diff --git a/vendor/github.com/google/go-github/github/repos_releases_test.go b/vendor/github.com/google/go-github/github/repos_releases_test.go new file mode 100644 index 000000000000..9a2c4c8afff2 --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_releases_test.go @@ -0,0 +1,353 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "reflect" + "strings" + "testing" +) + +func TestRepositoriesService_ListReleases(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/releases", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 2} + releases, _, err := client.Repositories.ListReleases(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Repositories.ListReleases returned error: %v", err) + } + want := []*RepositoryRelease{{ID: Int(1)}} + if !reflect.DeepEqual(releases, want) { + t.Errorf("Repositories.ListReleases returned %+v, want %+v", releases, want) + } +} + +func TestRepositoriesService_GetRelease(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/releases/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":1,"author":{"login":"l"}}`) + }) + + release, resp, err := client.Repositories.GetRelease(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Repositories.GetRelease returned error: %v\n%v", err, resp.Body) + } + + want := &RepositoryRelease{ID: Int(1), Author: &User{Login: String("l")}} + if !reflect.DeepEqual(release, want) { + t.Errorf("Repositories.GetRelease returned %+v, want %+v", release, want) + } +} + +func TestRepositoriesService_GetLatestRelease(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/releases/latest", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":3}`) + }) + + release, resp, err := client.Repositories.GetLatestRelease(context.Background(), "o", "r") + if err != nil { + t.Errorf("Repositories.GetLatestRelease returned error: %v\n%v", err, resp.Body) + } + + want := &RepositoryRelease{ID: Int(3)} + if !reflect.DeepEqual(release, want) { + t.Errorf("Repositories.GetLatestRelease returned %+v, want %+v", release, want) + } +} + +func TestRepositoriesService_GetReleaseByTag(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/releases/tags/foo", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":13}`) + }) + + release, resp, err := client.Repositories.GetReleaseByTag(context.Background(), "o", "r", "foo") + if err != nil { + t.Errorf("Repositories.GetReleaseByTag returned error: %v\n%v", err, resp.Body) + } + + want := &RepositoryRelease{ID: Int(13)} + if !reflect.DeepEqual(release, want) { + t.Errorf("Repositories.GetReleaseByTag returned %+v, want %+v", release, want) + } +} + +func TestRepositoriesService_CreateRelease(t *testing.T) { + setup() + defer teardown() + + input := &RepositoryRelease{Name: String("v1.0")} + + mux.HandleFunc("/repos/o/r/releases", func(w http.ResponseWriter, r *http.Request) { + v := new(RepositoryRelease) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + fmt.Fprint(w, `{"id":1}`) + }) + + release, _, err := client.Repositories.CreateRelease(context.Background(), "o", "r", input) + if err != nil { + t.Errorf("Repositories.CreateRelease returned error: %v", err) + } + + want := &RepositoryRelease{ID: Int(1)} + if !reflect.DeepEqual(release, want) { + t.Errorf("Repositories.CreateRelease returned %+v, want %+v", release, want) + } +} + +func TestRepositoriesService_EditRelease(t *testing.T) { + setup() + defer teardown() + + input := &RepositoryRelease{Name: String("n")} + + mux.HandleFunc("/repos/o/r/releases/1", func(w http.ResponseWriter, r *http.Request) { + v := new(RepositoryRelease) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + fmt.Fprint(w, `{"id":1}`) + }) + + release, _, err := client.Repositories.EditRelease(context.Background(), "o", "r", 1, input) + if err != nil { + t.Errorf("Repositories.EditRelease returned error: %v", err) + } + want := &RepositoryRelease{ID: Int(1)} + if !reflect.DeepEqual(release, want) { + t.Errorf("Repositories.EditRelease returned = %+v, want %+v", release, want) + } +} + +func TestRepositoriesService_DeleteRelease(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/releases/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Repositories.DeleteRelease(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Repositories.DeleteRelease returned error: %v", err) + } +} + +func TestRepositoriesService_ListReleaseAssets(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/releases/1/assets", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 2} + assets, _, err := client.Repositories.ListReleaseAssets(context.Background(), "o", "r", 1, opt) + if err != nil { + t.Errorf("Repositories.ListReleaseAssets returned error: %v", err) + } + want := []*ReleaseAsset{{ID: Int(1)}} + if !reflect.DeepEqual(assets, want) { + t.Errorf("Repositories.ListReleaseAssets returned %+v, want %+v", assets, want) + } +} + +func TestRepositoriesService_GetReleaseAsset(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":1}`) + }) + + asset, _, err := client.Repositories.GetReleaseAsset(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Repositories.GetReleaseAsset returned error: %v", err) + } + want := &ReleaseAsset{ID: Int(1)} + if !reflect.DeepEqual(asset, want) { + t.Errorf("Repositories.GetReleaseAsset returned %+v, want %+v", asset, want) + } +} + +func TestRepositoriesService_DownloadReleaseAsset_Stream(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", defaultMediaType) + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", "attachment; filename=hello-world.txt") + fmt.Fprint(w, "Hello World") + }) + + reader, _, err := client.Repositories.DownloadReleaseAsset(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Repositories.DownloadReleaseAsset returned error: %v", err) + } + want := []byte("Hello World") + content, err := ioutil.ReadAll(reader) + if err != nil { + t.Errorf("Repositories.DownloadReleaseAsset returned bad reader: %v", err) + } + if !bytes.Equal(want, content) { + t.Errorf("Repositories.DownloadReleaseAsset returned %+v, want %+v", content, want) + } +} + +func TestRepositoriesService_DownloadReleaseAsset_Redirect(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", defaultMediaType) + http.Redirect(w, r, "/yo", http.StatusFound) + }) + + _, got, err := client.Repositories.DownloadReleaseAsset(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Repositories.DownloadReleaseAsset returned error: %v", err) + } + want := "/yo" + if !strings.HasSuffix(got, want) { + t.Errorf("Repositories.DownloadReleaseAsset returned %+v, want %+v", got, want) + } +} + +func TestRepositoriesService_DownloadReleaseAsset_APIError(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", defaultMediaType) + w.WriteHeader(http.StatusNotFound) + fmt.Fprint(w, `{"message":"Not Found","documentation_url":"https://developer.github.com/v3"}`) + }) + + resp, loc, err := client.Repositories.DownloadReleaseAsset(context.Background(), "o", "r", 1) + if err == nil { + t.Error("Repositories.DownloadReleaseAsset did not return an error") + } + + if resp != nil { + resp.Close() + t.Error("Repositories.DownloadReleaseAsset returned stream, want nil") + } + + if loc != "" { + t.Errorf(`Repositories.DownloadReleaseAsset returned "%s", want empty ""`, loc) + } +} + +func TestRepositoriesService_EditReleaseAsset(t *testing.T) { + setup() + defer teardown() + + input := &ReleaseAsset{Name: String("n")} + + mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) { + v := new(ReleaseAsset) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + fmt.Fprint(w, `{"id":1}`) + }) + + asset, _, err := client.Repositories.EditReleaseAsset(context.Background(), "o", "r", 1, input) + if err != nil { + t.Errorf("Repositories.EditReleaseAsset returned error: %v", err) + } + want := &ReleaseAsset{ID: Int(1)} + if !reflect.DeepEqual(asset, want) { + t.Errorf("Repositories.EditReleaseAsset returned = %+v, want %+v", asset, want) + } +} + +func TestRepositoriesService_DeleteReleaseAsset(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Repositories.DeleteReleaseAsset(context.Background(), "o", "r", 1) + if err != nil { + t.Errorf("Repositories.DeleteReleaseAsset returned error: %v", err) + } +} + +func TestRepositoriesService_UploadReleaseAsset(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/releases/1/assets", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + testHeader(t, r, "Content-Type", "text/plain; charset=utf-8") + testHeader(t, r, "Content-Length", "12") + testFormValues(t, r, values{"name": "n"}) + testBody(t, r, "Upload me !\n") + + fmt.Fprintf(w, `{"id":1}`) + }) + + file, dir, err := openTestFile("upload.txt", "Upload me !\n") + if err != nil { + t.Fatalf("Unable to create temp file: %v", err) + } + defer os.RemoveAll(dir) + + opt := &UploadOptions{Name: "n"} + asset, _, err := client.Repositories.UploadReleaseAsset(context.Background(), "o", "r", 1, opt, file) + if err != nil { + t.Errorf("Repositories.UploadReleaseAssert returned error: %v", err) + } + want := &ReleaseAsset{ID: Int(1)} + if !reflect.DeepEqual(asset, want) { + t.Errorf("Repositories.UploadReleaseAssert returned %+v, want %+v", asset, want) + } +} diff --git a/vendor/github.com/google/go-github/github/repos_stats_test.go b/vendor/github.com/google/go-github/github/repos_stats_test.go new file mode 100644 index 000000000000..1da94d013380 --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_stats_test.go @@ -0,0 +1,211 @@ +// Copyright 2014 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" + "time" +) + +func TestRepositoriesService_ListContributorsStats(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/stats/contributors", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + + fmt.Fprint(w, ` +[ + { + "author": { + "id": 1 + }, + "total": 135, + "weeks": [ + { + "w": 1367712000, + "a": 6898, + "d": 77, + "c": 10 + } + ] + } +] +`) + }) + + stats, _, err := client.Repositories.ListContributorsStats(context.Background(), "o", "r") + if err != nil { + t.Errorf("RepositoriesService.ListContributorsStats returned error: %v", err) + } + + want := []*ContributorStats{ + { + Author: &Contributor{ + ID: Int(1), + }, + Total: Int(135), + Weeks: []WeeklyStats{ + { + Week: &Timestamp{time.Date(2013, 05, 05, 00, 00, 00, 0, time.UTC).Local()}, + Additions: Int(6898), + Deletions: Int(77), + Commits: Int(10), + }, + }, + }, + } + + if !reflect.DeepEqual(stats, want) { + t.Errorf("RepositoriesService.ListContributorsStats returned %+v, want %+v", stats, want) + } +} + +func TestRepositoriesService_ListCommitActivity(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/stats/commit_activity", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + + fmt.Fprint(w, ` +[ + { + "days": [0, 3, 26, 20, 39, 1, 0], + "total": 89, + "week": 1336280400 + } +] +`) + }) + + activity, _, err := client.Repositories.ListCommitActivity(context.Background(), "o", "r") + if err != nil { + t.Errorf("RepositoriesService.ListCommitActivity returned error: %v", err) + } + + want := []*WeeklyCommitActivity{ + { + Days: []int{0, 3, 26, 20, 39, 1, 0}, + Total: Int(89), + Week: &Timestamp{time.Date(2012, 05, 06, 05, 00, 00, 0, time.UTC).Local()}, + }, + } + + if !reflect.DeepEqual(activity, want) { + t.Errorf("RepositoriesService.ListCommitActivity returned %+v, want %+v", activity, want) + } +} + +func TestRepositoriesService_ListCodeFrequency(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/stats/code_frequency", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + + fmt.Fprint(w, `[[1302998400, 1124, -435]]`) + }) + + code, _, err := client.Repositories.ListCodeFrequency(context.Background(), "o", "r") + if err != nil { + t.Errorf("RepositoriesService.ListCodeFrequency returned error: %v", err) + } + + want := []*WeeklyStats{{ + Week: &Timestamp{time.Date(2011, 04, 17, 00, 00, 00, 0, time.UTC).Local()}, + Additions: Int(1124), + Deletions: Int(-435), + }} + + if !reflect.DeepEqual(code, want) { + t.Errorf("RepositoriesService.ListCodeFrequency returned %+v, want %+v", code, want) + } +} + +func TestRepositoriesService_Participation(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/stats/participation", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + + fmt.Fprint(w, ` +{ + "all": [ + 11,21,15,2,8,1,8,23,17,21,11,10,33, + 91,38,34,22,23,32,3,43,87,71,18,13,5, + 13,16,66,27,12,45,110,117,13,8,18,9,19, + 26,39,12,20,31,46,91,45,10,24,9,29,7 + ], + "owner": [ + 3,2,3,0,2,0,5,14,7,9,1,5,0, + 48,19,2,0,1,10,2,23,40,35,8,8,2, + 10,6,30,0,2,9,53,104,3,3,10,4,7, + 11,21,4,4,22,26,63,11,2,14,1,10,3 + ] +} +`) + }) + + participation, _, err := client.Repositories.ListParticipation(context.Background(), "o", "r") + if err != nil { + t.Errorf("RepositoriesService.ListParticipation returned error: %v", err) + } + + want := &RepositoryParticipation{ + All: []int{ + 11, 21, 15, 2, 8, 1, 8, 23, 17, 21, 11, 10, 33, + 91, 38, 34, 22, 23, 32, 3, 43, 87, 71, 18, 13, 5, + 13, 16, 66, 27, 12, 45, 110, 117, 13, 8, 18, 9, 19, + 26, 39, 12, 20, 31, 46, 91, 45, 10, 24, 9, 29, 7, + }, + Owner: []int{ + 3, 2, 3, 0, 2, 0, 5, 14, 7, 9, 1, 5, 0, + 48, 19, 2, 0, 1, 10, 2, 23, 40, 35, 8, 8, 2, + 10, 6, 30, 0, 2, 9, 53, 104, 3, 3, 10, 4, 7, + 11, 21, 4, 4, 22, 26, 63, 11, 2, 14, 1, 10, 3, + }, + } + + if !reflect.DeepEqual(participation, want) { + t.Errorf("RepositoriesService.ListParticipation returned %+v, want %+v", participation, want) + } +} + +func TestRepositoriesService_ListPunchCard(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/stats/punch_card", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + + fmt.Fprint(w, `[ + [0, 0, 5], + [0, 1, 43], + [0, 2, 21] + ]`) + }) + + card, _, err := client.Repositories.ListPunchCard(context.Background(), "o", "r") + if err != nil { + t.Errorf("RepositoriesService.ListPunchCard returned error: %v", err) + } + + want := []*PunchCard{ + {Day: Int(0), Hour: Int(0), Commits: Int(5)}, + {Day: Int(0), Hour: Int(1), Commits: Int(43)}, + {Day: Int(0), Hour: Int(2), Commits: Int(21)}, + } + + if !reflect.DeepEqual(card, want) { + t.Errorf("RepositoriesService.ListPunchCard returned %+v, want %+v", card, want) + } +} diff --git a/vendor/github.com/google/go-github/github/repos_statuses_test.go b/vendor/github.com/google/go-github/github/repos_statuses_test.go new file mode 100644 index 000000000000..8e6c9378d272 --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_statuses_test.go @@ -0,0 +1,97 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestRepositoriesService_ListStatuses(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/commits/r/statuses", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 2} + statuses, _, err := client.Repositories.ListStatuses(context.Background(), "o", "r", "r", opt) + if err != nil { + t.Errorf("Repositories.ListStatuses returned error: %v", err) + } + + want := []*RepoStatus{{ID: Int(1)}} + if !reflect.DeepEqual(statuses, want) { + t.Errorf("Repositories.ListStatuses returned %+v, want %+v", statuses, want) + } +} + +func TestRepositoriesService_ListStatuses_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.ListStatuses(context.Background(), "%", "r", "r", nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_CreateStatus(t *testing.T) { + setup() + defer teardown() + + input := &RepoStatus{State: String("s"), TargetURL: String("t"), Description: String("d")} + + mux.HandleFunc("/repos/o/r/statuses/r", func(w http.ResponseWriter, r *http.Request) { + v := new(RepoStatus) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + fmt.Fprint(w, `{"id":1}`) + }) + + status, _, err := client.Repositories.CreateStatus(context.Background(), "o", "r", "r", input) + if err != nil { + t.Errorf("Repositories.CreateStatus returned error: %v", err) + } + + want := &RepoStatus{ID: Int(1)} + if !reflect.DeepEqual(status, want) { + t.Errorf("Repositories.CreateStatus returned %+v, want %+v", status, want) + } +} + +func TestRepositoriesService_CreateStatus_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.CreateStatus(context.Background(), "%", "r", "r", nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_GetCombinedStatus(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/commits/r/status", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `{"state":"success", "statuses":[{"id":1}]}`) + }) + + opt := &ListOptions{Page: 2} + status, _, err := client.Repositories.GetCombinedStatus(context.Background(), "o", "r", "r", opt) + if err != nil { + t.Errorf("Repositories.GetCombinedStatus returned error: %v", err) + } + + want := &CombinedStatus{State: String("success"), Statuses: []RepoStatus{{ID: Int(1)}}} + if !reflect.DeepEqual(status, want) { + t.Errorf("Repositories.GetCombinedStatus returned %+v, want %+v", status, want) + } +} diff --git a/vendor/github.com/google/go-github/github/repos_test.go b/vendor/github.com/google/go-github/github/repos_test.go new file mode 100644 index 000000000000..21ce09b30332 --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_test.go @@ -0,0 +1,713 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "strings" + "testing" +) + +func TestRepositoriesService_List_authenticatedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/repos", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeLicensesPreview) + fmt.Fprint(w, `[{"id":1},{"id":2}]`) + }) + + repos, _, err := client.Repositories.List(context.Background(), "", nil) + if err != nil { + t.Errorf("Repositories.List returned error: %v", err) + } + + want := []*Repository{{ID: Int(1)}, {ID: Int(2)}} + if !reflect.DeepEqual(repos, want) { + t.Errorf("Repositories.List returned %+v, want %+v", repos, want) + } +} + +func TestRepositoriesService_List_specifiedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/repos", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeLicensesPreview) + testFormValues(t, r, values{ + "visibility": "public", + "affiliation": "owner,collaborator", + "sort": "created", + "direction": "asc", + "page": "2", + }) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &RepositoryListOptions{ + Visibility: "public", + Affiliation: "owner,collaborator", + Sort: "created", + Direction: "asc", + ListOptions: ListOptions{Page: 2}, + } + repos, _, err := client.Repositories.List(context.Background(), "u", opt) + if err != nil { + t.Errorf("Repositories.List returned error: %v", err) + } + + want := []*Repository{{ID: Int(1)}} + if !reflect.DeepEqual(repos, want) { + t.Errorf("Repositories.List returned %+v, want %+v", repos, want) + } +} + +func TestRepositoriesService_List_specifiedUser_type(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/repos", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeLicensesPreview) + testFormValues(t, r, values{ + "type": "owner", + }) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &RepositoryListOptions{ + Type: "owner", + } + repos, _, err := client.Repositories.List(context.Background(), "u", opt) + if err != nil { + t.Errorf("Repositories.List returned error: %v", err) + } + + want := []*Repository{{ID: Int(1)}} + if !reflect.DeepEqual(repos, want) { + t.Errorf("Repositories.List returned %+v, want %+v", repos, want) + } +} + +func TestRepositoriesService_List_invalidUser(t *testing.T) { + _, _, err := client.Repositories.List(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_ListByOrg(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/orgs/o/repos", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeLicensesPreview) + testFormValues(t, r, values{ + "type": "forks", + "page": "2", + }) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &RepositoryListByOrgOptions{"forks", ListOptions{Page: 2}} + repos, _, err := client.Repositories.ListByOrg(context.Background(), "o", opt) + if err != nil { + t.Errorf("Repositories.ListByOrg returned error: %v", err) + } + + want := []*Repository{{ID: Int(1)}} + if !reflect.DeepEqual(repos, want) { + t.Errorf("Repositories.ListByOrg returned %+v, want %+v", repos, want) + } +} + +func TestRepositoriesService_ListByOrg_invalidOrg(t *testing.T) { + _, _, err := client.Repositories.ListByOrg(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_ListAll(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repositories", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "since": "1", + "page": "2", + "per_page": "3", + }) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &RepositoryListAllOptions{1, ListOptions{2, 3}} + repos, _, err := client.Repositories.ListAll(context.Background(), opt) + if err != nil { + t.Errorf("Repositories.ListAll returned error: %v", err) + } + + want := []*Repository{{ID: Int(1)}} + if !reflect.DeepEqual(repos, want) { + t.Errorf("Repositories.ListAll returned %+v, want %+v", repos, want) + } +} + +func TestRepositoriesService_Create_user(t *testing.T) { + setup() + defer teardown() + + input := &Repository{Name: String("n")} + + mux.HandleFunc("/user/repos", func(w http.ResponseWriter, r *http.Request) { + v := new(Repository) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + repo, _, err := client.Repositories.Create(context.Background(), "", input) + if err != nil { + t.Errorf("Repositories.Create returned error: %v", err) + } + + want := &Repository{ID: Int(1)} + if !reflect.DeepEqual(repo, want) { + t.Errorf("Repositories.Create returned %+v, want %+v", repo, want) + } +} + +func TestRepositoriesService_Create_org(t *testing.T) { + setup() + defer teardown() + + input := &Repository{Name: String("n")} + + mux.HandleFunc("/orgs/o/repos", func(w http.ResponseWriter, r *http.Request) { + v := new(Repository) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + repo, _, err := client.Repositories.Create(context.Background(), "o", input) + if err != nil { + t.Errorf("Repositories.Create returned error: %v", err) + } + + want := &Repository{ID: Int(1)} + if !reflect.DeepEqual(repo, want) { + t.Errorf("Repositories.Create returned %+v, want %+v", repo, want) + } +} + +func TestRepositoriesService_Create_invalidOrg(t *testing.T) { + _, _, err := client.Repositories.Create(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_Get(t *testing.T) { + setup() + defer teardown() + + acceptHeader := []string{mediaTypeLicensesPreview, mediaTypeSquashPreview, mediaTypeCodesOfConductPreview} + mux.HandleFunc("/repos/o/r", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", strings.Join(acceptHeader, ", ")) + fmt.Fprint(w, `{"id":1,"name":"n","description":"d","owner":{"login":"l"},"license":{"key":"mit"}}`) + }) + + repo, _, err := client.Repositories.Get(context.Background(), "o", "r") + if err != nil { + t.Errorf("Repositories.Get returned error: %v", err) + } + + want := &Repository{ID: Int(1), Name: String("n"), Description: String("d"), Owner: &User{Login: String("l")}, License: &License{Key: String("mit")}} + if !reflect.DeepEqual(repo, want) { + t.Errorf("Repositories.Get returned %+v, want %+v", repo, want) + } +} + +func TestRepositoriesService_GetCodeOfConduct(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/community/code_of_conduct", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeCodesOfConductPreview) + fmt.Fprint(w, `{ + "key": "key", + "name": "name", + "url": "url", + "body": "body"}`, + ) + }) + + coc, _, err := client.Repositories.GetCodeOfConduct(context.Background(), "o", "r") + if err != nil { + t.Errorf("Repositories.GetCodeOfConduct returned error: %v", err) + } + + want := &CodeOfConduct{ + Key: String("key"), + Name: String("name"), + URL: String("url"), + Body: String("body"), + } + + if !reflect.DeepEqual(coc, want) { + t.Errorf("Repositories.Get returned %+v, want %+v", coc, want) + } +} + +func TestRepositoriesService_GetByID(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repositories/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeLicensesPreview) + fmt.Fprint(w, `{"id":1,"name":"n","description":"d","owner":{"login":"l"},"license":{"key":"mit"}}`) + }) + + repo, _, err := client.Repositories.GetByID(context.Background(), 1) + if err != nil { + t.Errorf("Repositories.GetByID returned error: %v", err) + } + + want := &Repository{ID: Int(1), Name: String("n"), Description: String("d"), Owner: &User{Login: String("l")}, License: &License{Key: String("mit")}} + if !reflect.DeepEqual(repo, want) { + t.Errorf("Repositories.GetByID returned %+v, want %+v", repo, want) + } +} + +func TestRepositoriesService_Edit(t *testing.T) { + setup() + defer teardown() + + i := true + input := &Repository{HasIssues: &i} + + mux.HandleFunc("/repos/o/r", func(w http.ResponseWriter, r *http.Request) { + v := new(Repository) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + fmt.Fprint(w, `{"id":1}`) + }) + + repo, _, err := client.Repositories.Edit(context.Background(), "o", "r", input) + if err != nil { + t.Errorf("Repositories.Edit returned error: %v", err) + } + + want := &Repository{ID: Int(1)} + if !reflect.DeepEqual(repo, want) { + t.Errorf("Repositories.Edit returned %+v, want %+v", repo, want) + } +} + +func TestRepositoriesService_Delete(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Repositories.Delete(context.Background(), "o", "r") + if err != nil { + t.Errorf("Repositories.Delete returned error: %v", err) + } +} + +func TestRepositoriesService_Get_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.Get(context.Background(), "%", "r") + testURLParseError(t, err) +} + +func TestRepositoriesService_Edit_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.Edit(context.Background(), "%", "r", nil) + testURLParseError(t, err) +} + +func TestRepositoriesService_ListContributors(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/contributors", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "anon": "true", + "page": "2", + }) + fmt.Fprint(w, `[{"contributions":42}]`) + }) + + opts := &ListContributorsOptions{Anon: "true", ListOptions: ListOptions{Page: 2}} + contributors, _, err := client.Repositories.ListContributors(context.Background(), "o", "r", opts) + if err != nil { + t.Errorf("Repositories.ListContributors returned error: %v", err) + } + + want := []*Contributor{{Contributions: Int(42)}} + if !reflect.DeepEqual(contributors, want) { + t.Errorf("Repositories.ListContributors returned %+v, want %+v", contributors, want) + } +} + +func TestRepositoriesService_ListLanguages(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/languages", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"go":1}`) + }) + + languages, _, err := client.Repositories.ListLanguages(context.Background(), "o", "r") + if err != nil { + t.Errorf("Repositories.ListLanguages returned error: %v", err) + } + + want := map[string]int{"go": 1} + if !reflect.DeepEqual(languages, want) { + t.Errorf("Repositories.ListLanguages returned %+v, want %+v", languages, want) + } +} + +func TestRepositoriesService_ListTeams(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/teams", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 2} + teams, _, err := client.Repositories.ListTeams(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Repositories.ListTeams returned error: %v", err) + } + + want := []*Team{{ID: Int(1)}} + if !reflect.DeepEqual(teams, want) { + t.Errorf("Repositories.ListTeams returned %+v, want %+v", teams, want) + } +} + +func TestRepositoriesService_ListTags(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/tags", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"name":"n", "commit" : {"sha" : "s", "url" : "u"}, "zipball_url": "z", "tarball_url": "t"}]`) + }) + + opt := &ListOptions{Page: 2} + tags, _, err := client.Repositories.ListTags(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Repositories.ListTags returned error: %v", err) + } + + want := []*RepositoryTag{ + { + Name: String("n"), + Commit: &Commit{ + SHA: String("s"), + URL: String("u"), + }, + ZipballURL: String("z"), + TarballURL: String("t"), + }, + } + if !reflect.DeepEqual(tags, want) { + t.Errorf("Repositories.ListTags returned %+v, want %+v", tags, want) + } +} + +func TestRepositoriesService_ListBranches(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/branches", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeProtectedBranchesPreview) + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"name":"master", "commit" : {"sha" : "a57781", "url" : "https://api.github.com/repos/o/r/commits/a57781"}}]`) + }) + + opt := &ListOptions{Page: 2} + branches, _, err := client.Repositories.ListBranches(context.Background(), "o", "r", opt) + if err != nil { + t.Errorf("Repositories.ListBranches returned error: %v", err) + } + + want := []*Branch{{Name: String("master"), Commit: &RepositoryCommit{SHA: String("a57781"), URL: String("https://api.github.com/repos/o/r/commits/a57781")}}} + if !reflect.DeepEqual(branches, want) { + t.Errorf("Repositories.ListBranches returned %+v, want %+v", branches, want) + } +} + +func TestRepositoriesService_GetBranch(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/branches/b", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeProtectedBranchesPreview) + fmt.Fprint(w, `{"name":"n", "commit":{"sha":"s","commit":{"message":"m"}}, "protected":true}`) + }) + + branch, _, err := client.Repositories.GetBranch(context.Background(), "o", "r", "b") + if err != nil { + t.Errorf("Repositories.GetBranch returned error: %v", err) + } + + want := &Branch{ + Name: String("n"), + Commit: &RepositoryCommit{ + SHA: String("s"), + Commit: &Commit{ + Message: String("m"), + }, + }, + Protected: Bool(true), + } + + if !reflect.DeepEqual(branch, want) { + t.Errorf("Repositories.GetBranch returned %+v, want %+v", branch, want) + } +} + +func TestRepositoriesService_GetBranchProtection(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/branches/b/protection", func(w http.ResponseWriter, r *http.Request) { + v := new(ProtectionRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeProtectedBranchesPreview) + fmt.Fprintf(w, `{"required_status_checks":{"include_admins":true,"strict":true,"contexts":["continuous-integration"]},"required_pull_request_reviews":{"include_admins":true},"enforce_admins":{"url":"/repos/o/r/branches/b/protection/enforce_admins","enabled":true},"restrictions":{"users":[{"id":1,"login":"u"}],"teams":[{"id":2,"slug":"t"}]}}`) + }) + + protection, _, err := client.Repositories.GetBranchProtection(context.Background(), "o", "r", "b") + if err != nil { + t.Errorf("Repositories.GetBranchProtection returned error: %v", err) + } + + want := &Protection{ + RequiredStatusChecks: &RequiredStatusChecks{ + IncludeAdmins: true, + Strict: true, + Contexts: []string{"continuous-integration"}, + }, + RequiredPullRequestReviews: &RequiredPullRequestReviews{ + IncludeAdmins: true, + }, + EnforceAdmins: &AdminEnforcement{ + URL: String("/repos/o/r/branches/b/protection/enforce_admins"), + Enabled: true, + }, + Restrictions: &BranchRestrictions{ + Users: []*User{ + {Login: String("u"), ID: Int(1)}, + }, + Teams: []*Team{ + {Slug: String("t"), ID: Int(2)}, + }, + }, + } + if !reflect.DeepEqual(protection, want) { + t.Errorf("Repositories.GetBranchProtection returned %+v, want %+v", protection, want) + } +} + +func TestRepositoriesService_UpdateBranchProtection(t *testing.T) { + setup() + defer teardown() + + input := &ProtectionRequest{ + RequiredStatusChecks: &RequiredStatusChecks{ + IncludeAdmins: true, + Strict: true, + Contexts: []string{"continuous-integration"}, + }, + RequiredPullRequestReviews: &RequiredPullRequestReviews{ + IncludeAdmins: true, + }, + Restrictions: &BranchRestrictionsRequest{ + Users: []string{"u"}, + Teams: []string{"t"}, + }, + } + + mux.HandleFunc("/repos/o/r/branches/b/protection", func(w http.ResponseWriter, r *http.Request) { + v := new(ProtectionRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PUT") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + testHeader(t, r, "Accept", mediaTypeProtectedBranchesPreview) + fmt.Fprintf(w, `{"required_status_checks":{"include_admins":true,"strict":true,"contexts":["continuous-integration"]},"required_pull_request_reviews":{"include_admins":true},"restrictions":{"users":[{"id":1,"login":"u"}],"teams":[{"id":2,"slug":"t"}]}}`) + }) + + protection, _, err := client.Repositories.UpdateBranchProtection(context.Background(), "o", "r", "b", input) + if err != nil { + t.Errorf("Repositories.UpdateBranchProtection returned error: %v", err) + } + + want := &Protection{ + RequiredStatusChecks: &RequiredStatusChecks{ + IncludeAdmins: true, + Strict: true, + Contexts: []string{"continuous-integration"}, + }, + RequiredPullRequestReviews: &RequiredPullRequestReviews{ + IncludeAdmins: true, + }, + Restrictions: &BranchRestrictions{ + Users: []*User{ + {Login: String("u"), ID: Int(1)}, + }, + Teams: []*Team{ + {Slug: String("t"), ID: Int(2)}, + }, + }, + } + if !reflect.DeepEqual(protection, want) { + t.Errorf("Repositories.UpdateBranchProtection returned %+v, want %+v", protection, want) + } +} + +func TestRepositoriesService_RemoveBranchProtection(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/branches/b/protection", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + testHeader(t, r, "Accept", mediaTypeProtectedBranchesPreview) + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Repositories.RemoveBranchProtection(context.Background(), "o", "r", "b") + if err != nil { + t.Errorf("Repositories.RemoveBranchProtection returned error: %v", err) + } +} + +func TestRepositoriesService_ListLanguages_invalidOwner(t *testing.T) { + _, _, err := client.Repositories.ListLanguages(context.Background(), "%", "%") + testURLParseError(t, err) +} + +func TestRepositoriesService_License(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/license", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"name": "LICENSE", "path": "LICENSE", "license":{"key":"mit","name":"MIT License","spdx_id":"MIT","url":"https://api.github.com/licenses/mit","featured":true}}`) + }) + + got, _, err := client.Repositories.License(context.Background(), "o", "r") + if err != nil { + t.Errorf("Repositories.License returned error: %v", err) + } + + want := &RepositoryLicense{ + Name: String("LICENSE"), + Path: String("LICENSE"), + License: &License{ + Name: String("MIT License"), + Key: String("mit"), + SPDXID: String("MIT"), + URL: String("https://api.github.com/licenses/mit"), + Featured: Bool(true), + }, + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("Repositories.License returned %+v, want %+v", got, want) + } +} + +func TestRepositoriesService_GetRequiredStatusChecks(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/branches/b/protection/required_status_checks", func(w http.ResponseWriter, r *http.Request) { + v := new(ProtectionRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeProtectedBranchesPreview) + fmt.Fprint(w, `{"include_admins": true,"strict": true,"contexts": ["x","y","z"]}`) + }) + + checks, _, err := client.Repositories.GetRequiredStatusChecks(context.Background(), "o", "r", "b") + if err != nil { + t.Errorf("Repositories.GetRequiredStatusChecks returned error: %v", err) + } + + want := &RequiredStatusChecks{ + IncludeAdmins: true, + Strict: true, + Contexts: []string{"x", "y", "z"}, + } + if !reflect.DeepEqual(checks, want) { + t.Errorf("Repositories.GetRequiredStatusChecks returned %+v, want %+v", checks, want) + } +} + +func TestRepositoriesService_ListRequiredStatusChecksContexts(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/branches/b/protection/required_status_checks/contexts", func(w http.ResponseWriter, r *http.Request) { + v := new(ProtectionRequest) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeProtectedBranchesPreview) + fmt.Fprint(w, `["x", "y", "z"]`) + }) + + contexts, _, err := client.Repositories.ListRequiredStatusChecksContexts(context.Background(), "o", "r", "b") + if err != nil { + t.Errorf("Repositories.ListRequiredStatusChecksContexts returned error: %v", err) + } + + want := []string{"x", "y", "z"} + if !reflect.DeepEqual(contexts, want) { + t.Errorf("Repositories.ListRequiredStatusChecksContexts returned %+v, want %+v", contexts, want) + } +} diff --git a/vendor/github.com/google/go-github/github/repos_traffic_test.go b/vendor/github.com/google/go-github/github/repos_traffic_test.go new file mode 100644 index 000000000000..c425f5cd5d14 --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_traffic_test.go @@ -0,0 +1,145 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" + "time" +) + +func TestRepositoriesService_ListTrafficReferrers(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/traffic/popular/referrers", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprintf(w, `[{ + "referrer": "Google", + "count": 4, + "uniques": 3 + }]`) + }) + referrers, _, err := client.Repositories.ListTrafficReferrers(context.Background(), "o", "r") + if err != nil { + t.Errorf("Repositories.ListPaths returned error: %+v", err) + } + + want := []*TrafficReferrer{{ + Referrer: String("Google"), + Count: Int(4), + Uniques: Int(3), + }} + if !reflect.DeepEqual(referrers, want) { + t.Errorf("Repositories.ListReferrers returned %+v, want %+v", referrers, want) + } + +} + +func TestRepositoriesService_ListTrafficPaths(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/traffic/popular/paths", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprintf(w, `[{ + "path": "/github/hubot", + "title": "github/hubot: A customizable life embetterment robot.", + "count": 3542, + "uniques": 2225 + }]`) + }) + paths, _, err := client.Repositories.ListTrafficPaths(context.Background(), "o", "r") + if err != nil { + t.Errorf("Repositories.ListPaths returned error: %+v", err) + } + + want := []*TrafficPath{{ + Path: String("/github/hubot"), + Title: String("github/hubot: A customizable life embetterment robot."), + Count: Int(3542), + Uniques: Int(2225), + }} + if !reflect.DeepEqual(paths, want) { + t.Errorf("Repositories.ListPaths returned %+v, want %+v", paths, want) + } + +} + +func TestRepositoriesService_ListTrafficViews(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/traffic/views", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprintf(w, `{"count": 7, + "uniques": 6, + "views": [{ + "timestamp": "2016-05-31T16:00:00.000Z", + "count": 7, + "uniques": 6 + }]}`) + }) + + views, _, err := client.Repositories.ListTrafficViews(context.Background(), "o", "r", nil) + if err != nil { + t.Errorf("Repositories.ListPaths returned error: %+v", err) + } + + want := &TrafficViews{ + Views: []*TrafficData{{ + Timestamp: &Timestamp{time.Date(2016, time.May, 31, 16, 0, 0, 0, time.UTC)}, + Count: Int(7), + Uniques: Int(6), + }}, + Count: Int(7), + Uniques: Int(6), + } + + if !reflect.DeepEqual(views, want) { + t.Errorf("Repositories.ListViews returned %+v, want %+v", views, want) + } + +} + +func TestRepositoriesService_ListTrafficClones(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/repos/o/r/traffic/clones", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprintf(w, `{"count": 7, + "uniques": 6, + "clones": [{ + "timestamp": "2016-05-31T16:00:00.00Z", + "count": 7, + "uniques": 6 + }]}`) + }) + + clones, _, err := client.Repositories.ListTrafficClones(context.Background(), "o", "r", nil) + if err != nil { + t.Errorf("Repositories.ListPaths returned error: %+v", err) + } + + want := &TrafficClones{ + Clones: []*TrafficData{{ + Timestamp: &Timestamp{time.Date(2016, time.May, 31, 16, 0, 0, 0, time.UTC)}, + Count: Int(7), + Uniques: Int(6), + }}, + Count: Int(7), + Uniques: Int(6), + } + + if !reflect.DeepEqual(clones, want) { + t.Errorf("Repositories.ListViews returned %+v, want %+v", clones, want) + } + +} diff --git a/vendor/github.com/google/go-github/github/search_test.go b/vendor/github.com/google/go-github/github/search_test.go new file mode 100644 index 000000000000..ee2f69be2a33 --- /dev/null +++ b/vendor/github.com/google/go-github/github/search_test.go @@ -0,0 +1,239 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + + "testing" +) + +func TestSearchService_Repositories(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/search/repositories", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "q": "blah", + "sort": "forks", + "order": "desc", + "page": "2", + "per_page": "2", + }) + + fmt.Fprint(w, `{"total_count": 4, "incomplete_results": false, "items": [{"id":1},{"id":2}]}`) + }) + + opts := &SearchOptions{Sort: "forks", Order: "desc", ListOptions: ListOptions{Page: 2, PerPage: 2}} + result, _, err := client.Search.Repositories(context.Background(), "blah", opts) + if err != nil { + t.Errorf("Search.Repositories returned error: %v", err) + } + + want := &RepositoriesSearchResult{ + Total: Int(4), + IncompleteResults: Bool(false), + Repositories: []Repository{{ID: Int(1)}, {ID: Int(2)}}, + } + if !reflect.DeepEqual(result, want) { + t.Errorf("Search.Repositories returned %+v, want %+v", result, want) + } +} + +func TestSearchService_Commits(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/search/commits", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "q": "blah", + "sort": "author-date", + "order": "desc", + }) + + fmt.Fprint(w, `{"total_count": 4, "incomplete_results": false, "items": [{"sha":"random_hash1"},{"sha":"random_hash2"}]}`) + }) + + opts := &SearchOptions{Sort: "author-date", Order: "desc"} + result, _, err := client.Search.Commits(context.Background(), "blah", opts) + if err != nil { + t.Errorf("Search.Commits returned error: %v", err) + } + + want := &CommitsSearchResult{ + Total: Int(4), + IncompleteResults: Bool(false), + Commits: []*CommitResult{{SHA: String("random_hash1")}, {SHA: String("random_hash2")}}, + } + if !reflect.DeepEqual(result, want) { + t.Errorf("Search.Commits returned %+v, want %+v", result, want) + } +} + +func TestSearchService_Issues(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/search/issues", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "q": "blah", + "sort": "forks", + "order": "desc", + "page": "2", + "per_page": "2", + }) + + fmt.Fprint(w, `{"total_count": 4, "incomplete_results": true, "items": [{"number":1},{"number":2}]}`) + }) + + opts := &SearchOptions{Sort: "forks", Order: "desc", ListOptions: ListOptions{Page: 2, PerPage: 2}} + result, _, err := client.Search.Issues(context.Background(), "blah", opts) + if err != nil { + t.Errorf("Search.Issues returned error: %v", err) + } + + want := &IssuesSearchResult{ + Total: Int(4), + IncompleteResults: Bool(true), + Issues: []Issue{{Number: Int(1)}, {Number: Int(2)}}, + } + if !reflect.DeepEqual(result, want) { + t.Errorf("Search.Issues returned %+v, want %+v", result, want) + } +} + +func TestSearchService_Users(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/search/users", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "q": "blah", + "sort": "forks", + "order": "desc", + "page": "2", + "per_page": "2", + }) + + fmt.Fprint(w, `{"total_count": 4, "incomplete_results": false, "items": [{"id":1},{"id":2}]}`) + }) + + opts := &SearchOptions{Sort: "forks", Order: "desc", ListOptions: ListOptions{Page: 2, PerPage: 2}} + result, _, err := client.Search.Users(context.Background(), "blah", opts) + if err != nil { + t.Errorf("Search.Issues returned error: %v", err) + } + + want := &UsersSearchResult{ + Total: Int(4), + IncompleteResults: Bool(false), + Users: []User{{ID: Int(1)}, {ID: Int(2)}}, + } + if !reflect.DeepEqual(result, want) { + t.Errorf("Search.Users returned %+v, want %+v", result, want) + } +} + +func TestSearchService_Code(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/search/code", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "q": "blah", + "sort": "forks", + "order": "desc", + "page": "2", + "per_page": "2", + }) + + fmt.Fprint(w, `{"total_count": 4, "incomplete_results": false, "items": [{"name":"1"},{"name":"2"}]}`) + }) + + opts := &SearchOptions{Sort: "forks", Order: "desc", ListOptions: ListOptions{Page: 2, PerPage: 2}} + result, _, err := client.Search.Code(context.Background(), "blah", opts) + if err != nil { + t.Errorf("Search.Code returned error: %v", err) + } + + want := &CodeSearchResult{ + Total: Int(4), + IncompleteResults: Bool(false), + CodeResults: []CodeResult{{Name: String("1")}, {Name: String("2")}}, + } + if !reflect.DeepEqual(result, want) { + t.Errorf("Search.Code returned %+v, want %+v", result, want) + } +} + +func TestSearchService_CodeTextMatch(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/search/code", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + + textMatchResponse := ` + { + "total_count": 1, + "incomplete_results": false, + "items": [ + { + "name":"gopher1", + "text_matches": [ + { + "fragment": "I'm afraid my friend what you have found\nIs a gopher who lives to feed", + "matches": [ + { + "text": "gopher", + "indices": [ + 14, + 21 + ] + } + ] + } + ] + } + ] + } + ` + + fmt.Fprint(w, textMatchResponse) + }) + + opts := &SearchOptions{Sort: "forks", Order: "desc", ListOptions: ListOptions{Page: 2, PerPage: 2}, TextMatch: true} + result, _, err := client.Search.Code(context.Background(), "blah", opts) + if err != nil { + t.Errorf("Search.Code returned error: %v", err) + } + + wantedCodeResult := CodeResult{ + Name: String("gopher1"), + TextMatches: []TextMatch{{ + Fragment: String("I'm afraid my friend what you have found\nIs a gopher who lives to feed"), + Matches: []Match{{Text: String("gopher"), Indices: []int{14, 21}}}, + }, + }, + } + + want := &CodeSearchResult{ + Total: Int(1), + IncompleteResults: Bool(false), + CodeResults: []CodeResult{wantedCodeResult}, + } + if !reflect.DeepEqual(result, want) { + t.Errorf("Search.Code returned %+v, want %+v", result, want) + } +} diff --git a/vendor/github.com/google/go-github/github/strings_test.go b/vendor/github.com/google/go-github/github/strings_test.go new file mode 100644 index 000000000000..6af88acd390a --- /dev/null +++ b/vendor/github.com/google/go-github/github/strings_test.go @@ -0,0 +1,141 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "fmt" + "testing" + "time" +) + +func TestStringify(t *testing.T) { + var nilPointer *string + + var tests = []struct { + in interface{} + out string + }{ + // basic types + {"foo", `"foo"`}, + {123, `123`}, + {1.5, `1.5`}, + {false, `false`}, + { + []string{"a", "b"}, + `["a" "b"]`, + }, + { + struct { + A []string + }{nil}, + // nil slice is skipped + `{}`, + }, + { + struct { + A string + }{"foo"}, + // structs not of a named type get no prefix + `{A:"foo"}`, + }, + + // pointers + {nilPointer, ``}, + {String("foo"), `"foo"`}, + {Int(123), `123`}, + {Bool(false), `false`}, + { + []*string{String("a"), String("b")}, + `["a" "b"]`, + }, + + // actual GitHub structs + { + Timestamp{time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)}, + `github.Timestamp{2006-01-02 15:04:05 +0000 UTC}`, + }, + { + &Timestamp{time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)}, + `github.Timestamp{2006-01-02 15:04:05 +0000 UTC}`, + }, + { + User{ID: Int(123), Name: String("n")}, + `github.User{ID:123, Name:"n"}`, + }, + { + Repository{Owner: &User{ID: Int(123)}}, + `github.Repository{Owner:github.User{ID:123}}`, + }, + } + + for i, tt := range tests { + s := Stringify(tt.in) + if s != tt.out { + t.Errorf("%d. Stringify(%q) => %q, want %q", i, tt.in, s, tt.out) + } + } +} + +// Directly test the String() methods on various GitHub types. We don't do an +// exaustive test of all the various field types, since TestStringify() above +// takes care of that. Rather, we just make sure that Stringify() is being +// used to build the strings, which we do by verifying that pointers are +// stringified as their underlying value. +func TestString(t *testing.T) { + var tests = []struct { + in interface{} + out string + }{ + {CodeResult{Name: String("n")}, `github.CodeResult{Name:"n"}`}, + {CommitAuthor{Name: String("n")}, `github.CommitAuthor{Name:"n"}`}, + {CommitFile{SHA: String("s")}, `github.CommitFile{SHA:"s"}`}, + {CommitStats{Total: Int(1)}, `github.CommitStats{Total:1}`}, + {CommitsComparison{TotalCommits: Int(1)}, `github.CommitsComparison{TotalCommits:1}`}, + {Commit{SHA: String("s")}, `github.Commit{SHA:"s"}`}, + {Event{ID: String("1")}, `github.Event{ID:"1"}`}, + {GistComment{ID: Int(1)}, `github.GistComment{ID:1}`}, + {GistFile{Size: Int(1)}, `github.GistFile{Size:1}`}, + {Gist{ID: String("1")}, `github.Gist{ID:"1", Files:map[]}`}, + {GitObject{SHA: String("s")}, `github.GitObject{SHA:"s"}`}, + {Gitignore{Name: String("n")}, `github.Gitignore{Name:"n"}`}, + {Hook{ID: Int(1)}, `github.Hook{Config:map[], ID:1}`}, + {IssueComment{ID: Int(1)}, `github.IssueComment{ID:1}`}, + {Issue{Number: Int(1)}, `github.Issue{Number:1}`}, + {Key{ID: Int(1)}, `github.Key{ID:1}`}, + {Label{ID: Int(1), Name: String("l")}, `github.Label{ID:1, Name:"l"}`}, + {Organization{ID: Int(1)}, `github.Organization{ID:1}`}, + {PullRequestComment{ID: Int(1)}, `github.PullRequestComment{ID:1}`}, + {PullRequest{Number: Int(1)}, `github.PullRequest{Number:1}`}, + {PullRequestReview{ID: Int(1)}, `github.PullRequestReview{ID:1}`}, + {DraftReviewComment{Position: Int(1)}, `github.DraftReviewComment{Position:1}`}, + {PullRequestReviewRequest{Body: String("r")}, `github.PullRequestReviewRequest{Body:"r"}`}, + {PullRequestReviewDismissalRequest{Message: String("r")}, `github.PullRequestReviewDismissalRequest{Message:"r"}`}, + {PushEventCommit{SHA: String("s")}, `github.PushEventCommit{SHA:"s"}`}, + {PushEvent{PushID: Int(1)}, `github.PushEvent{PushID:1}`}, + {Reference{Ref: String("r")}, `github.Reference{Ref:"r"}`}, + {ReleaseAsset{ID: Int(1)}, `github.ReleaseAsset{ID:1}`}, + {RepoStatus{ID: Int(1)}, `github.RepoStatus{ID:1}`}, + {RepositoryComment{ID: Int(1)}, `github.RepositoryComment{ID:1}`}, + {RepositoryCommit{SHA: String("s")}, `github.RepositoryCommit{SHA:"s"}`}, + {RepositoryContent{Name: String("n")}, `github.RepositoryContent{Name:"n"}`}, + {RepositoryRelease{ID: Int(1)}, `github.RepositoryRelease{ID:1}`}, + {Repository{ID: Int(1)}, `github.Repository{ID:1}`}, + {Team{ID: Int(1)}, `github.Team{ID:1}`}, + {TreeEntry{SHA: String("s")}, `github.TreeEntry{SHA:"s"}`}, + {Tree{SHA: String("s")}, `github.Tree{SHA:"s"}`}, + {User{ID: Int(1)}, `github.User{ID:1}`}, + {WebHookAuthor{Name: String("n")}, `github.WebHookAuthor{Name:"n"}`}, + {WebHookCommit{ID: String("1")}, `github.WebHookCommit{ID:"1"}`}, + {WebHookPayload{Ref: String("r")}, `github.WebHookPayload{Ref:"r"}`}, + } + + for i, tt := range tests { + s := tt.in.(fmt.Stringer).String() + if s != tt.out { + t.Errorf("%d. String() => %q, want %q", i, tt.in, tt.out) + } + } +} diff --git a/vendor/github.com/google/go-github/github/timestamp_test.go b/vendor/github.com/google/go-github/github/timestamp_test.go new file mode 100644 index 000000000000..997b43db899c --- /dev/null +++ b/vendor/github.com/google/go-github/github/timestamp_test.go @@ -0,0 +1,187 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "encoding/json" + "fmt" + "testing" + "time" +) + +const ( + emptyTimeStr = `"0001-01-01T00:00:00Z"` + referenceTimeStr = `"2006-01-02T15:04:05Z"` + referenceUnixTimeStr = `1136214245` +) + +var ( + referenceTime = time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC) + unixOrigin = time.Unix(0, 0).In(time.UTC) +) + +func TestTimestamp_Marshal(t *testing.T) { + testCases := []struct { + desc string + data Timestamp + want string + wantErr bool + equal bool + }{ + {"Reference", Timestamp{referenceTime}, referenceTimeStr, false, true}, + {"Empty", Timestamp{}, emptyTimeStr, false, true}, + {"Mismatch", Timestamp{}, referenceTimeStr, false, false}, + } + for _, tc := range testCases { + out, err := json.Marshal(tc.data) + if gotErr := err != nil; gotErr != tc.wantErr { + t.Errorf("%s: gotErr=%v, wantErr=%v, err=%v", tc.desc, gotErr, tc.wantErr, err) + } + got := string(out) + equal := got == tc.want + if (got == tc.want) != tc.equal { + t.Errorf("%s: got=%s, want=%s, equal=%v, want=%v", tc.desc, got, tc.want, equal, tc.equal) + } + } +} + +func TestTimestamp_Unmarshal(t *testing.T) { + testCases := []struct { + desc string + data string + want Timestamp + wantErr bool + equal bool + }{ + {"Reference", referenceTimeStr, Timestamp{referenceTime}, false, true}, + {"ReferenceUnix", `1136214245`, Timestamp{referenceTime}, false, true}, + {"Empty", emptyTimeStr, Timestamp{}, false, true}, + {"UnixStart", `0`, Timestamp{unixOrigin}, false, true}, + {"Mismatch", referenceTimeStr, Timestamp{}, false, false}, + {"MismatchUnix", `0`, Timestamp{}, false, false}, + {"Invalid", `"asdf"`, Timestamp{referenceTime}, true, false}, + } + for _, tc := range testCases { + var got Timestamp + err := json.Unmarshal([]byte(tc.data), &got) + if gotErr := err != nil; gotErr != tc.wantErr { + t.Errorf("%s: gotErr=%v, wantErr=%v, err=%v", tc.desc, gotErr, tc.wantErr, err) + continue + } + equal := got.Equal(tc.want) + if equal != tc.equal { + t.Errorf("%s: got=%#v, want=%#v, equal=%v, want=%v", tc.desc, got, tc.want, equal, tc.equal) + } + } +} + +func TestTimstamp_MarshalReflexivity(t *testing.T) { + testCases := []struct { + desc string + data Timestamp + }{ + {"Reference", Timestamp{referenceTime}}, + {"Empty", Timestamp{}}, + } + for _, tc := range testCases { + data, err := json.Marshal(tc.data) + if err != nil { + t.Errorf("%s: Marshal err=%v", tc.desc, err) + } + var got Timestamp + err = json.Unmarshal(data, &got) + if err != nil { + t.Errorf("%s: Unmarshal err=%v", tc.desc, err) + } + if !got.Equal(tc.data) { + t.Errorf("%s: %+v != %+v", tc.desc, got, data) + } + } +} + +type WrappedTimestamp struct { + A int + Time Timestamp +} + +func TestWrappedTimstamp_Marshal(t *testing.T) { + testCases := []struct { + desc string + data WrappedTimestamp + want string + wantErr bool + equal bool + }{ + {"Reference", WrappedTimestamp{0, Timestamp{referenceTime}}, fmt.Sprintf(`{"A":0,"Time":%s}`, referenceTimeStr), false, true}, + {"Empty", WrappedTimestamp{}, fmt.Sprintf(`{"A":0,"Time":%s}`, emptyTimeStr), false, true}, + {"Mismatch", WrappedTimestamp{}, fmt.Sprintf(`{"A":0,"Time":%s}`, referenceTimeStr), false, false}, + } + for _, tc := range testCases { + out, err := json.Marshal(tc.data) + if gotErr := err != nil; gotErr != tc.wantErr { + t.Errorf("%s: gotErr=%v, wantErr=%v, err=%v", tc.desc, gotErr, tc.wantErr, err) + } + got := string(out) + equal := got == tc.want + if equal != tc.equal { + t.Errorf("%s: got=%s, want=%s, equal=%v, want=%v", tc.desc, got, tc.want, equal, tc.equal) + } + } +} + +func TestWrappedTimstamp_Unmarshal(t *testing.T) { + testCases := []struct { + desc string + data string + want WrappedTimestamp + wantErr bool + equal bool + }{ + {"Reference", referenceTimeStr, WrappedTimestamp{0, Timestamp{referenceTime}}, false, true}, + {"ReferenceUnix", referenceUnixTimeStr, WrappedTimestamp{0, Timestamp{referenceTime}}, false, true}, + {"Empty", emptyTimeStr, WrappedTimestamp{0, Timestamp{}}, false, true}, + {"UnixStart", `0`, WrappedTimestamp{0, Timestamp{unixOrigin}}, false, true}, + {"Mismatch", referenceTimeStr, WrappedTimestamp{0, Timestamp{}}, false, false}, + {"MismatchUnix", `0`, WrappedTimestamp{0, Timestamp{}}, false, false}, + {"Invalid", `"asdf"`, WrappedTimestamp{0, Timestamp{referenceTime}}, true, false}, + } + for _, tc := range testCases { + var got Timestamp + err := json.Unmarshal([]byte(tc.data), &got) + if gotErr := err != nil; gotErr != tc.wantErr { + t.Errorf("%s: gotErr=%v, wantErr=%v, err=%v", tc.desc, gotErr, tc.wantErr, err) + continue + } + equal := got.Time.Equal(tc.want.Time.Time) + if equal != tc.equal { + t.Errorf("%s: got=%#v, want=%#v, equal=%v, want=%v", tc.desc, got, tc.want, equal, tc.equal) + } + } +} + +func TestWrappedTimstamp_MarshalReflexivity(t *testing.T) { + testCases := []struct { + desc string + data WrappedTimestamp + }{ + {"Reference", WrappedTimestamp{0, Timestamp{referenceTime}}}, + {"Empty", WrappedTimestamp{0, Timestamp{}}}, + } + for _, tc := range testCases { + bytes, err := json.Marshal(tc.data) + if err != nil { + t.Errorf("%s: Marshal err=%v", tc.desc, err) + } + var got WrappedTimestamp + err = json.Unmarshal(bytes, &got) + if err != nil { + t.Errorf("%s: Unmarshal err=%v", tc.desc, err) + } + if !got.Time.Equal(tc.data.Time) { + t.Errorf("%s: %+v != %+v", tc.desc, got, tc.data) + } + } +} diff --git a/vendor/github.com/google/go-github/github/users_administration_test.go b/vendor/github.com/google/go-github/github/users_administration_test.go new file mode 100644 index 000000000000..8f02b88cea77 --- /dev/null +++ b/vendor/github.com/google/go-github/github/users_administration_test.go @@ -0,0 +1,72 @@ +// Copyright 2014 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "net/http" + "testing" +) + +func TestUsersService_PromoteSiteAdmin(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/site_admin", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PUT") + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Users.PromoteSiteAdmin(context.Background(), "u") + if err != nil { + t.Errorf("Users.PromoteSiteAdmin returned error: %v", err) + } +} + +func TestUsersService_DemoteSiteAdmin(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/site_admin", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Users.DemoteSiteAdmin(context.Background(), "u") + if err != nil { + t.Errorf("Users.DemoteSiteAdmin returned error: %v", err) + } +} + +func TestUsersService_Suspend(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/suspended", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PUT") + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Users.Suspend(context.Background(), "u") + if err != nil { + t.Errorf("Users.Suspend returned error: %v", err) + } +} + +func TestUsersService_Unsuspend(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/suspended", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Users.Unsuspend(context.Background(), "u") + if err != nil { + t.Errorf("Users.Unsuspend returned error: %v", err) + } +} diff --git a/vendor/github.com/google/go-github/github/users_blocking_test.go b/vendor/github.com/google/go-github/github/users_blocking_test.go new file mode 100644 index 000000000000..ea7a8abdc23a --- /dev/null +++ b/vendor/github.com/google/go-github/github/users_blocking_test.go @@ -0,0 +1,90 @@ +// Copyright 2017 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestUsersService_ListBlockedUsers(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/blocks", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeBlockUsersPreview) + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{ + "login": "octocat" + }]`) + }) + + opt := &ListOptions{Page: 2} + blockedUsers, _, err := client.Users.ListBlockedUsers(context.Background(), opt) + if err != nil { + t.Errorf("Users.ListBlockedUsers returned error: %v", err) + } + + want := []*User{{Login: String("octocat")}} + if !reflect.DeepEqual(blockedUsers, want) { + t.Errorf("Users.ListBlockedUsers returned %+v, want %+v", blockedUsers, want) + } +} + +func TestUsersService_IsBlocked(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/blocks/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeBlockUsersPreview) + w.WriteHeader(http.StatusNoContent) + }) + + isBlocked, _, err := client.Users.IsBlocked(context.Background(), "u") + if err != nil { + t.Errorf("Users.IsBlocked returned error: %v", err) + } + if want := true; isBlocked != want { + t.Errorf("Users.IsBlocked returned %+v, want %+v", isBlocked, want) + } +} + +func TestUsersService_BlockUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/blocks/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PUT") + testHeader(t, r, "Accept", mediaTypeBlockUsersPreview) + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Users.BlockUser(context.Background(), "u") + if err != nil { + t.Errorf("Users.BlockUser returned error: %v", err) + } +} + +func TestUsersService_UnblockUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/blocks/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + testHeader(t, r, "Accept", mediaTypeBlockUsersPreview) + w.WriteHeader(http.StatusNoContent) + }) + + _, err := client.Users.UnblockUser(context.Background(), "u") + if err != nil { + t.Errorf("Users.UnblockUser returned error: %v", err) + } +} diff --git a/vendor/github.com/google/go-github/github/users_emails_test.go b/vendor/github.com/google/go-github/github/users_emails_test.go new file mode 100644 index 000000000000..3f8176a00788 --- /dev/null +++ b/vendor/github.com/google/go-github/github/users_emails_test.go @@ -0,0 +1,95 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestUsersService_ListEmails(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/emails", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{ + "email": "user@example.com", + "verified": false, + "primary": true + }]`) + }) + + opt := &ListOptions{Page: 2} + emails, _, err := client.Users.ListEmails(context.Background(), opt) + if err != nil { + t.Errorf("Users.ListEmails returned error: %v", err) + } + + want := []*UserEmail{{Email: String("user@example.com"), Verified: Bool(false), Primary: Bool(true)}} + if !reflect.DeepEqual(emails, want) { + t.Errorf("Users.ListEmails returned %+v, want %+v", emails, want) + } +} + +func TestUsersService_AddEmails(t *testing.T) { + setup() + defer teardown() + + input := []string{"new@example.com"} + + mux.HandleFunc("/user/emails", func(w http.ResponseWriter, r *http.Request) { + var v []string + json.NewDecoder(r.Body).Decode(&v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `[{"email":"old@example.com"}, {"email":"new@example.com"}]`) + }) + + emails, _, err := client.Users.AddEmails(context.Background(), input) + if err != nil { + t.Errorf("Users.AddEmails returned error: %v", err) + } + + want := []*UserEmail{ + {Email: String("old@example.com")}, + {Email: String("new@example.com")}, + } + if !reflect.DeepEqual(emails, want) { + t.Errorf("Users.AddEmails returned %+v, want %+v", emails, want) + } +} + +func TestUsersService_DeleteEmails(t *testing.T) { + setup() + defer teardown() + + input := []string{"user@example.com"} + + mux.HandleFunc("/user/emails", func(w http.ResponseWriter, r *http.Request) { + var v []string + json.NewDecoder(r.Body).Decode(&v) + + testMethod(t, r, "DELETE") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + }) + + _, err := client.Users.DeleteEmails(context.Background(), input) + if err != nil { + t.Errorf("Users.DeleteEmails returned error: %v", err) + } +} diff --git a/vendor/github.com/google/go-github/github/users_followers_test.go b/vendor/github.com/google/go-github/github/users_followers_test.go new file mode 100644 index 000000000000..688af2c4450c --- /dev/null +++ b/vendor/github.com/google/go-github/github/users_followers_test.go @@ -0,0 +1,223 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestUsersService_ListFollowers_authenticatedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/followers", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 2} + users, _, err := client.Users.ListFollowers(context.Background(), "", opt) + if err != nil { + t.Errorf("Users.ListFollowers returned error: %v", err) + } + + want := []*User{{ID: Int(1)}} + if !reflect.DeepEqual(users, want) { + t.Errorf("Users.ListFollowers returned %+v, want %+v", users, want) + } +} + +func TestUsersService_ListFollowers_specifiedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/followers", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[{"id":1}]`) + }) + + users, _, err := client.Users.ListFollowers(context.Background(), "u", nil) + if err != nil { + t.Errorf("Users.ListFollowers returned error: %v", err) + } + + want := []*User{{ID: Int(1)}} + if !reflect.DeepEqual(users, want) { + t.Errorf("Users.ListFollowers returned %+v, want %+v", users, want) + } +} + +func TestUsersService_ListFollowers_invalidUser(t *testing.T) { + _, _, err := client.Users.ListFollowers(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestUsersService_ListFollowing_authenticatedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/following", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opts := &ListOptions{Page: 2} + users, _, err := client.Users.ListFollowing(context.Background(), "", opts) + if err != nil { + t.Errorf("Users.ListFollowing returned error: %v", err) + } + + want := []*User{{ID: Int(1)}} + if !reflect.DeepEqual(users, want) { + t.Errorf("Users.ListFollowing returned %+v, want %+v", users, want) + } +} + +func TestUsersService_ListFollowing_specifiedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/following", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[{"id":1}]`) + }) + + users, _, err := client.Users.ListFollowing(context.Background(), "u", nil) + if err != nil { + t.Errorf("Users.ListFollowing returned error: %v", err) + } + + want := []*User{{ID: Int(1)}} + if !reflect.DeepEqual(users, want) { + t.Errorf("Users.ListFollowing returned %+v, want %+v", users, want) + } +} + +func TestUsersService_ListFollowing_invalidUser(t *testing.T) { + _, _, err := client.Users.ListFollowing(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestUsersService_IsFollowing_authenticatedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/following/t", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNoContent) + }) + + following, _, err := client.Users.IsFollowing(context.Background(), "", "t") + if err != nil { + t.Errorf("Users.IsFollowing returned error: %v", err) + } + if want := true; following != want { + t.Errorf("Users.IsFollowing returned %+v, want %+v", following, want) + } +} + +func TestUsersService_IsFollowing_specifiedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/following/t", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNoContent) + }) + + following, _, err := client.Users.IsFollowing(context.Background(), "u", "t") + if err != nil { + t.Errorf("Users.IsFollowing returned error: %v", err) + } + if want := true; following != want { + t.Errorf("Users.IsFollowing returned %+v, want %+v", following, want) + } +} + +func TestUsersService_IsFollowing_false(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/following/t", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNotFound) + }) + + following, _, err := client.Users.IsFollowing(context.Background(), "u", "t") + if err != nil { + t.Errorf("Users.IsFollowing returned error: %v", err) + } + if want := false; following != want { + t.Errorf("Users.IsFollowing returned %+v, want %+v", following, want) + } +} + +func TestUsersService_IsFollowing_error(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/following/t", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + http.Error(w, "BadRequest", http.StatusBadRequest) + }) + + following, _, err := client.Users.IsFollowing(context.Background(), "u", "t") + if err == nil { + t.Errorf("Expected HTTP 400 response") + } + if want := false; following != want { + t.Errorf("Users.IsFollowing returned %+v, want %+v", following, want) + } +} + +func TestUsersService_IsFollowing_invalidUser(t *testing.T) { + _, _, err := client.Users.IsFollowing(context.Background(), "%", "%") + testURLParseError(t, err) +} + +func TestUsersService_Follow(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/following/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PUT") + }) + + _, err := client.Users.Follow(context.Background(), "u") + if err != nil { + t.Errorf("Users.Follow returned error: %v", err) + } +} + +func TestUsersService_Follow_invalidUser(t *testing.T) { + _, err := client.Users.Follow(context.Background(), "%") + testURLParseError(t, err) +} + +func TestUsersService_Unfollow(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/following/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Users.Unfollow(context.Background(), "u") + if err != nil { + t.Errorf("Users.Follow returned error: %v", err) + } +} + +func TestUsersService_Unfollow_invalidUser(t *testing.T) { + _, err := client.Users.Unfollow(context.Background(), "%") + testURLParseError(t, err) +} diff --git a/vendor/github.com/google/go-github/github/users_gpg_keys_test.go b/vendor/github.com/google/go-github/github/users_gpg_keys_test.go new file mode 100644 index 000000000000..f4180df98816 --- /dev/null +++ b/vendor/github.com/google/go-github/github/users_gpg_keys_test.go @@ -0,0 +1,139 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestUsersService_ListGPGKeys_authenticatedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/gpg_keys", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeGitSigningPreview) + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1,"primary_key_id":2}]`) + }) + + opt := &ListOptions{Page: 2} + keys, _, err := client.Users.ListGPGKeys(context.Background(), "", opt) + if err != nil { + t.Errorf("Users.ListGPGKeys returned error: %v", err) + } + + want := []*GPGKey{{ID: Int(1), PrimaryKeyID: Int(2)}} + if !reflect.DeepEqual(keys, want) { + t.Errorf("Users.ListGPGKeys = %+v, want %+v", keys, want) + } +} + +func TestUsersService_ListGPGKeys_specifiedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/gpg_keys", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeGitSigningPreview) + fmt.Fprint(w, `[{"id":1,"primary_key_id":2}]`) + }) + + keys, _, err := client.Users.ListGPGKeys(context.Background(), "u", nil) + if err != nil { + t.Errorf("Users.ListGPGKeys returned error: %v", err) + } + + want := []*GPGKey{{ID: Int(1), PrimaryKeyID: Int(2)}} + if !reflect.DeepEqual(keys, want) { + t.Errorf("Users.ListGPGKeys = %+v, want %+v", keys, want) + } +} + +func TestUsersService_ListGPGKeys_invalidUser(t *testing.T) { + _, _, err := client.Users.ListGPGKeys(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestUsersService_GetGPGKey(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/gpg_keys/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeGitSigningPreview) + fmt.Fprint(w, `{"id":1}`) + }) + + key, _, err := client.Users.GetGPGKey(context.Background(), 1) + if err != nil { + t.Errorf("Users.GetGPGKey returned error: %v", err) + } + + want := &GPGKey{ID: Int(1)} + if !reflect.DeepEqual(key, want) { + t.Errorf("Users.GetGPGKey = %+v, want %+v", key, want) + } +} + +func TestUsersService_CreateGPGKey(t *testing.T) { + setup() + defer teardown() + + input := ` +-----BEGIN PGP PUBLIC KEY BLOCK----- +Comment: GPGTools - https://gpgtools.org + +mQINBFcEd9kBEACo54TDbGhKlXKWMvJgecEUKPPcv7XdnpKdGb3LRw5MvFwT0V0f +... +=tqfb +-----END PGP PUBLIC KEY BLOCK-----` + + mux.HandleFunc("/user/gpg_keys", func(w http.ResponseWriter, r *http.Request) { + var gpgKey struct { + ArmoredPublicKey *string `json:"armored_public_key,omitempty"` + } + json.NewDecoder(r.Body).Decode(&gpgKey) + + testMethod(t, r, "POST") + testHeader(t, r, "Accept", mediaTypeGitSigningPreview) + if gpgKey.ArmoredPublicKey == nil || *gpgKey.ArmoredPublicKey != input { + t.Errorf("gpgKey = %+v, want %q", gpgKey, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + gpgKey, _, err := client.Users.CreateGPGKey(context.Background(), input) + if err != nil { + t.Errorf("Users.GetGPGKey returned error: %v", err) + } + + want := &GPGKey{ID: Int(1)} + if !reflect.DeepEqual(gpgKey, want) { + t.Errorf("Users.GetGPGKey = %+v, want %+v", gpgKey, want) + } +} + +func TestUsersService_DeleteGPGKey(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/gpg_keys/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + testHeader(t, r, "Accept", mediaTypeGitSigningPreview) + }) + + _, err := client.Users.DeleteGPGKey(context.Background(), 1) + if err != nil { + t.Errorf("Users.DeleteGPGKey returned error: %v", err) + } +} diff --git a/vendor/github.com/google/go-github/github/users_keys_test.go b/vendor/github.com/google/go-github/github/users_keys_test.go new file mode 100644 index 000000000000..8554c50cd924 --- /dev/null +++ b/vendor/github.com/google/go-github/github/users_keys_test.go @@ -0,0 +1,125 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestUsersService_ListKeys_authenticatedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/keys", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"page": "2"}) + fmt.Fprint(w, `[{"id":1}]`) + }) + + opt := &ListOptions{Page: 2} + keys, _, err := client.Users.ListKeys(context.Background(), "", opt) + if err != nil { + t.Errorf("Users.ListKeys returned error: %v", err) + } + + want := []*Key{{ID: Int(1)}} + if !reflect.DeepEqual(keys, want) { + t.Errorf("Users.ListKeys returned %+v, want %+v", keys, want) + } +} + +func TestUsersService_ListKeys_specifiedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u/keys", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `[{"id":1}]`) + }) + + keys, _, err := client.Users.ListKeys(context.Background(), "u", nil) + if err != nil { + t.Errorf("Users.ListKeys returned error: %v", err) + } + + want := []*Key{{ID: Int(1)}} + if !reflect.DeepEqual(keys, want) { + t.Errorf("Users.ListKeys returned %+v, want %+v", keys, want) + } +} + +func TestUsersService_ListKeys_invalidUser(t *testing.T) { + _, _, err := client.Users.ListKeys(context.Background(), "%", nil) + testURLParseError(t, err) +} + +func TestUsersService_GetKey(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/keys/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":1}`) + }) + + key, _, err := client.Users.GetKey(context.Background(), 1) + if err != nil { + t.Errorf("Users.GetKey returned error: %v", err) + } + + want := &Key{ID: Int(1)} + if !reflect.DeepEqual(key, want) { + t.Errorf("Users.GetKey returned %+v, want %+v", key, want) + } +} + +func TestUsersService_CreateKey(t *testing.T) { + setup() + defer teardown() + + input := &Key{Key: String("k"), Title: String("t")} + + mux.HandleFunc("/user/keys", func(w http.ResponseWriter, r *http.Request) { + v := new(Key) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "POST") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + key, _, err := client.Users.CreateKey(context.Background(), input) + if err != nil { + t.Errorf("Users.GetKey returned error: %v", err) + } + + want := &Key{ID: Int(1)} + if !reflect.DeepEqual(key, want) { + t.Errorf("Users.GetKey returned %+v, want %+v", key, want) + } +} + +func TestUsersService_DeleteKey(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/keys/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + }) + + _, err := client.Users.DeleteKey(context.Background(), 1) + if err != nil { + t.Errorf("Users.DeleteKey returned error: %v", err) + } +} diff --git a/vendor/github.com/google/go-github/github/users_test.go b/vendor/github.com/google/go-github/github/users_test.go new file mode 100644 index 000000000000..c2776a30d236 --- /dev/null +++ b/vendor/github.com/google/go-github/github/users_test.go @@ -0,0 +1,242 @@ +// Copyright 2013 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestUser_marshall(t *testing.T) { + testJSONMarshal(t, &User{}, "{}") + + u := &User{ + Login: String("l"), + ID: Int(1), + URL: String("u"), + AvatarURL: String("a"), + GravatarID: String("g"), + Name: String("n"), + Company: String("c"), + Blog: String("b"), + Location: String("l"), + Email: String("e"), + Hireable: Bool(true), + PublicRepos: Int(1), + Followers: Int(1), + Following: Int(1), + CreatedAt: &Timestamp{referenceTime}, + SuspendedAt: &Timestamp{referenceTime}, + } + want := `{ + "login": "l", + "id": 1, + "avatar_url": "a", + "gravatar_id": "g", + "name": "n", + "company": "c", + "blog": "b", + "location": "l", + "email": "e", + "hireable": true, + "public_repos": 1, + "followers": 1, + "following": 1, + "created_at": ` + referenceTimeStr + `, + "suspended_at": ` + referenceTimeStr + `, + "url": "u" + }` + testJSONMarshal(t, u, want) +} + +func TestUsersService_Get_authenticatedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":1}`) + }) + + user, _, err := client.Users.Get(context.Background(), "") + if err != nil { + t.Errorf("Users.Get returned error: %v", err) + } + + want := &User{ID: Int(1)} + if !reflect.DeepEqual(user, want) { + t.Errorf("Users.Get returned %+v, want %+v", user, want) + } +} + +func TestUsersService_Get_specifiedUser(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users/u", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":1}`) + }) + + user, _, err := client.Users.Get(context.Background(), "u") + if err != nil { + t.Errorf("Users.Get returned error: %v", err) + } + + want := &User{ID: Int(1)} + if !reflect.DeepEqual(user, want) { + t.Errorf("Users.Get returned %+v, want %+v", user, want) + } +} + +func TestUsersService_Get_invalidUser(t *testing.T) { + _, _, err := client.Users.Get(context.Background(), "%") + testURLParseError(t, err) +} + +func TestUsersService_GetByID(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + fmt.Fprint(w, `{"id":1}`) + }) + + user, _, err := client.Users.GetByID(context.Background(), 1) + if err != nil { + t.Errorf("Users.GetByID returned error: %v", err) + } + + want := &User{ID: Int(1)} + if !reflect.DeepEqual(user, want) { + t.Errorf("Users.GetByID returned %+v, want %+v", user, want) + } +} + +func TestUsersService_Edit(t *testing.T) { + setup() + defer teardown() + + input := &User{Name: String("n")} + + mux.HandleFunc("/user", func(w http.ResponseWriter, r *http.Request) { + v := new(User) + json.NewDecoder(r.Body).Decode(v) + + testMethod(t, r, "PATCH") + if !reflect.DeepEqual(v, input) { + t.Errorf("Request body = %+v, want %+v", v, input) + } + + fmt.Fprint(w, `{"id":1}`) + }) + + user, _, err := client.Users.Edit(context.Background(), input) + if err != nil { + t.Errorf("Users.Edit returned error: %v", err) + } + + want := &User{ID: Int(1)} + if !reflect.DeepEqual(user, want) { + t.Errorf("Users.Edit returned %+v, want %+v", user, want) + } +} + +func TestUsersService_ListAll(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/users", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{"since": "1", "page": "2"}) + fmt.Fprint(w, `[{"id":2}]`) + }) + + opt := &UserListOptions{1, ListOptions{Page: 2}} + users, _, err := client.Users.ListAll(context.Background(), opt) + if err != nil { + t.Errorf("Users.Get returned error: %v", err) + } + + want := []*User{{ID: Int(2)}} + if !reflect.DeepEqual(users, want) { + t.Errorf("Users.ListAll returned %+v, want %+v", users, want) + } +} + +func TestUsersService_ListInvitations(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/repository_invitations", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testHeader(t, r, "Accept", mediaTypeRepositoryInvitationsPreview) + fmt.Fprintf(w, `[{"id":1}, {"id":2}]`) + }) + + got, _, err := client.Users.ListInvitations(context.Background(), nil) + if err != nil { + t.Errorf("Users.ListInvitations returned error: %v", err) + } + + want := []*RepositoryInvitation{{ID: Int(1)}, {ID: Int(2)}} + if !reflect.DeepEqual(got, want) { + t.Errorf("Users.ListInvitations = %+v, want %+v", got, want) + } +} + +func TestUsersService_ListInvitations_withOptions(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/repository_invitations", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + testFormValues(t, r, values{ + "page": "2", + }) + testHeader(t, r, "Accept", mediaTypeRepositoryInvitationsPreview) + fmt.Fprintf(w, `[{"id":1}, {"id":2}]`) + }) + + _, _, err := client.Users.ListInvitations(context.Background(), &ListOptions{Page: 2}) + if err != nil { + t.Errorf("Users.ListInvitations returned error: %v", err) + } +} +func TestUsersService_AcceptInvitation(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/repository_invitations/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "PATCH") + testHeader(t, r, "Accept", mediaTypeRepositoryInvitationsPreview) + w.WriteHeader(http.StatusNoContent) + }) + + if _, err := client.Users.AcceptInvitation(context.Background(), 1); err != nil { + t.Errorf("Users.AcceptInvitation returned error: %v", err) + } +} + +func TestUsersService_DeclineInvitation(t *testing.T) { + setup() + defer teardown() + + mux.HandleFunc("/user/repository_invitations/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + testHeader(t, r, "Accept", mediaTypeRepositoryInvitationsPreview) + w.WriteHeader(http.StatusNoContent) + }) + + if _, err := client.Users.DeclineInvitation(context.Background(), 1); err != nil { + t.Errorf("Users.DeclineInvitation returned error: %v", err) + } +} diff --git a/vendor/github.com/google/go-querystring/.gitignore b/vendor/github.com/google/go-querystring/.gitignore new file mode 100644 index 000000000000..9ed3b07cefec --- /dev/null +++ b/vendor/github.com/google/go-querystring/.gitignore @@ -0,0 +1 @@ +*.test diff --git a/vendor/github.com/google/go-querystring/CONTRIBUTING.md b/vendor/github.com/google/go-querystring/CONTRIBUTING.md new file mode 100644 index 000000000000..51cf5cd1adae --- /dev/null +++ b/vendor/github.com/google/go-querystring/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# How to contribute # + +We'd love to accept your patches and contributions to this project. There are +a just a few small guidelines you need to follow. + + +## Contributor License Agreement ## + +Contributions to any Google project must be accompanied by a Contributor +License Agreement. This is not a copyright **assignment**, it simply gives +Google permission to use and redistribute your contributions as part of the +project. + + * If you are an individual writing original source code and you're sure you + own the intellectual property, then you'll need to sign an [individual + CLA][]. + + * If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a [corporate CLA][]. + +You generally only need to submit a CLA once, so if you've already submitted +one (even if it was for a different project), you probably don't need to do it +again. + +[individual CLA]: https://developers.google.com/open-source/cla/individual +[corporate CLA]: https://developers.google.com/open-source/cla/corporate + + +## Submitting a patch ## + + 1. It's generally best to start by opening a new issue describing the bug or + feature you're intending to fix. Even if you think it's relatively minor, + it's helpful to know what people are working on. Mention in the initial + issue that you are planning to work on that bug or feature so that it can + be assigned to you. + + 1. Follow the normal process of [forking][] the project, and setup a new + branch to work in. It's important that each group of changes be done in + separate branches in order to ensure that a pull request only includes the + commits related to that bug or feature. + + 1. Go makes it very simple to ensure properly formatted code, so always run + `go fmt` on your code before committing it. You should also run + [golint][] over your code. As noted in the [golint readme][], it's not + strictly necessary that your code be completely "lint-free", but this will + help you find common style issues. + + 1. Any significant changes should almost always be accompanied by tests. The + project already has good test coverage, so look at some of the existing + tests if you're unsure how to go about it. [gocov][] and [gocov-html][] + are invaluable tools for seeing which parts of your code aren't being + exercised by your tests. + + 1. Do your best to have [well-formed commit messages][] for each change. + This provides consistency throughout the project, and ensures that commit + messages are able to be formatted properly by various git tools. + + 1. Finally, push the commits to your fork and submit a [pull request][]. + +[forking]: https://help.github.com/articles/fork-a-repo +[golint]: https://github.com/golang/lint +[golint readme]: https://github.com/golang/lint/blob/master/README +[gocov]: https://github.com/axw/gocov +[gocov-html]: https://github.com/matm/gocov-html +[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html +[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits +[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/google/go-querystring/README.md b/vendor/github.com/google/go-querystring/README.md new file mode 100644 index 000000000000..03e937034d91 --- /dev/null +++ b/vendor/github.com/google/go-querystring/README.md @@ -0,0 +1,39 @@ +# go-querystring # + +go-querystring is Go library for encoding structs into URL query parameters. + + +**Documentation:** +**Build Status:** [![Build Status](https://drone.io/github.com/google/go-querystring/status.png)](https://drone.io/github.com/google/go-querystring/latest) + +## Usage ## + +```go +import "github.com/google/go-querystring/query" +``` + +go-querystring is designed to assist in scenarios where you want to construct a +URL using a struct that represents the URL query parameters. You might do this +to enforce the type safety of your parameters, for example, as is done in the +[go-github][] library. + +The query package exports a single `Values()` function. A simple example: + +```go +type Options struct { + Query string `url:"q"` + ShowAll bool `url:"all"` + Page int `url:"page"` +} + +opt := Options{ "foo", true, 2 } +v, _ := query.Values(opt) +fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2" +``` + +[go-github]: https://github.com/google/go-github/commit/994f6f8405f052a117d2d0b500054341048fbb08 + +## License ## + +This library is distributed under the BSD-style license found in the [LICENSE](./LICENSE) +file. diff --git a/vendor/github.com/google/go-querystring/query/BUILD b/vendor/github.com/google/go-querystring/query/BUILD index 9042ed984d35..1d7c92465122 100644 --- a/vendor/github.com/google/go-querystring/query/BUILD +++ b/vendor/github.com/google/go-querystring/query/BUILD @@ -1,14 +1,17 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["encode.go"], importpath = "github.com/google/go-querystring/query", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["encode_test.go"], + importpath = "github.com/google/go-querystring/query", + library = ":go_default_library", ) filegroup( @@ -22,4 +25,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/google/go-querystring/query/encode_test.go b/vendor/github.com/google/go-querystring/query/encode_test.go new file mode 100644 index 000000000000..09efb0f5b14c --- /dev/null +++ b/vendor/github.com/google/go-querystring/query/encode_test.go @@ -0,0 +1,286 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package query + +import ( + "fmt" + "net/url" + "reflect" + "testing" + "time" +) + +type Nested struct { + A SubNested `url:"a"` + B *SubNested `url:"b"` + Ptr *SubNested `url:"ptr,omitempty"` +} + +type SubNested struct { + Value string `url:"value"` +} + +func TestValues_types(t *testing.T) { + str := "string" + strPtr := &str + + tests := []struct { + in interface{} + want url.Values + }{ + { + // basic primitives + struct { + A string + B int + C uint + D float32 + E bool + }{}, + url.Values{ + "A": {""}, + "B": {"0"}, + "C": {"0"}, + "D": {"0"}, + "E": {"false"}, + }, + }, + { + // pointers + struct { + A *string + B *int + C **string + }{A: strPtr, C: &strPtr}, + url.Values{ + "A": {str}, + "B": {""}, + "C": {str}, + }, + }, + { + // slices and arrays + struct { + A []string + B []string `url:",comma"` + C []string `url:",space"` + D [2]string + E [2]string `url:",comma"` + F [2]string `url:",space"` + G []*string `url:",space"` + H []bool `url:",int,space"` + I []string `url:",brackets"` + }{ + A: []string{"a", "b"}, + B: []string{"a", "b"}, + C: []string{"a", "b"}, + D: [2]string{"a", "b"}, + E: [2]string{"a", "b"}, + F: [2]string{"a", "b"}, + G: []*string{&str, &str}, + H: []bool{true, false}, + I: []string{"a", "b"}, + }, + url.Values{ + "A": {"a", "b"}, + "B": {"a,b"}, + "C": {"a b"}, + "D": {"a", "b"}, + "E": {"a,b"}, + "F": {"a b"}, + "G": {"string string"}, + "H": {"1 0"}, + "I[]": {"a", "b"}, + }, + }, + { + // other types + struct { + A time.Time + B time.Time `url:",unix"` + C bool `url:",int"` + D bool `url:",int"` + }{ + A: time.Date(2000, 1, 1, 12, 34, 56, 0, time.UTC), + B: time.Date(2000, 1, 1, 12, 34, 56, 0, time.UTC), + C: true, + D: false, + }, + url.Values{ + "A": {"2000-01-01T12:34:56Z"}, + "B": {"946730096"}, + "C": {"1"}, + "D": {"0"}, + }, + }, + { + struct { + Nest Nested `url:"nest"` + }{ + Nested{ + A: SubNested{ + Value: "that", + }, + }, + }, + url.Values{ + "nest[a][value]": {"that"}, + "nest[b]": {""}, + }, + }, + { + struct { + Nest Nested `url:"nest"` + }{ + Nested{ + Ptr: &SubNested{ + Value: "that", + }, + }, + }, + url.Values{ + "nest[a][value]": {""}, + "nest[b]": {""}, + "nest[ptr][value]": {"that"}, + }, + }, + { + nil, + url.Values{}, + }, + } + + for i, tt := range tests { + v, err := Values(tt.in) + if err != nil { + t.Errorf("%d. Values(%q) returned error: %v", i, tt.in, err) + } + + if !reflect.DeepEqual(tt.want, v) { + t.Errorf("%d. Values(%q) returned %v, want %v", i, tt.in, v, tt.want) + } + } +} + +func TestValues_omitEmpty(t *testing.T) { + str := "" + s := struct { + a string + A string + B string `url:",omitempty"` + C string `url:"-"` + D string `url:"omitempty"` // actually named omitempty, not an option + E *string `url:",omitempty"` + }{E: &str} + + v, err := Values(s) + if err != nil { + t.Errorf("Values(%q) returned error: %v", s, err) + } + + want := url.Values{ + "A": {""}, + "omitempty": {""}, + "E": {""}, // E is included because the pointer is not empty, even though the string being pointed to is + } + if !reflect.DeepEqual(want, v) { + t.Errorf("Values(%q) returned %v, want %v", s, v, want) + } +} + +type A struct { + B +} + +type B struct { + C string +} + +type D struct { + B + C string +} + +func TestValues_embeddedStructs(t *testing.T) { + tests := []struct { + in interface{} + want url.Values + }{ + { + A{B{C: "foo"}}, + url.Values{"C": {"foo"}}, + }, + { + D{B: B{C: "bar"}, C: "foo"}, + url.Values{"C": {"foo", "bar"}}, + }, + } + + for i, tt := range tests { + v, err := Values(tt.in) + if err != nil { + t.Errorf("%d. Values(%q) returned error: %v", i, tt.in, err) + } + + if !reflect.DeepEqual(tt.want, v) { + t.Errorf("%d. Values(%q) returned %v, want %v", i, tt.in, v, tt.want) + } + } +} + +func TestValues_invalidInput(t *testing.T) { + _, err := Values("") + if err == nil { + t.Errorf("expected Values() to return an error on invalid input") + } +} + +type EncodedArgs []string + +func (m EncodedArgs) EncodeValues(key string, v *url.Values) error { + for i, arg := range m { + v.Set(fmt.Sprintf("%s.%d", key, i), arg) + } + return nil +} + +func TestValues_Marshaler(t *testing.T) { + s := struct { + Args EncodedArgs `url:"arg"` + }{[]string{"a", "b", "c"}} + v, err := Values(s) + if err != nil { + t.Errorf("Values(%q) returned error: %v", s, err) + } + + want := url.Values{ + "arg.0": {"a"}, + "arg.1": {"b"}, + "arg.2": {"c"}, + } + if !reflect.DeepEqual(want, v) { + t.Errorf("Values(%q) returned %v, want %v", s, v, want) + } +} + +func TestTagParsing(t *testing.T) { + name, opts := parseTag("field,foobar,foo") + if name != "field" { + t.Fatalf("name = %q, want field", name) + } + for _, tt := range []struct { + opt string + want bool + }{ + {"foobar", true}, + {"foo", true}, + {"bar", false}, + {"field", false}, + } { + if opts.Contains(tt.opt) != tt.want { + t.Errorf("Contains(%q) = %v", tt.opt, !tt.want) + } + } +} diff --git a/vendor/github.com/google/gofuzz/BUILD b/vendor/github.com/google/gofuzz/BUILD index 00998c19671a..a7aa0ee94ec3 100644 --- a/vendor/github.com/google/gofuzz/BUILD +++ b/vendor/github.com/google/gofuzz/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -12,6 +7,21 @@ go_library( "fuzz.go", ], importpath = "github.com/google/gofuzz", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["fuzz_test.go"], + importpath = "github.com/google/gofuzz", + library = ":go_default_library", +) + +go_test( + name = "go_default_xtest", + srcs = ["example_test.go"], + importpath = "github.com/google/gofuzz_test", + deps = [":go_default_library"], ) filegroup( @@ -25,4 +35,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/google/gofuzz/example_test.go b/vendor/github.com/google/gofuzz/example_test.go new file mode 100644 index 000000000000..792707a3a154 --- /dev/null +++ b/vendor/github.com/google/gofuzz/example_test.go @@ -0,0 +1,225 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fuzz_test + +import ( + "encoding/json" + "fmt" + "math/rand" + + "github.com/google/gofuzz" +) + +func ExampleSimple() { + type MyType struct { + A string + B string + C int + D struct { + E float64 + } + } + + f := fuzz.New() + object := MyType{} + + uniqueObjects := map[MyType]int{} + + for i := 0; i < 1000; i++ { + f.Fuzz(&object) + uniqueObjects[object]++ + } + fmt.Printf("Got %v unique objects.\n", len(uniqueObjects)) + // Output: + // Got 1000 unique objects. +} + +func ExampleCustom() { + type MyType struct { + A int + B string + } + + counter := 0 + f := fuzz.New().Funcs( + func(i *int, c fuzz.Continue) { + *i = counter + counter++ + }, + ) + object := MyType{} + + uniqueObjects := map[MyType]int{} + + for i := 0; i < 100; i++ { + f.Fuzz(&object) + if object.A != i { + fmt.Printf("Unexpected value: %#v\n", object) + } + uniqueObjects[object]++ + } + fmt.Printf("Got %v unique objects.\n", len(uniqueObjects)) + // Output: + // Got 100 unique objects. +} + +func ExampleComplex() { + type OtherType struct { + A string + B string + } + type MyType struct { + Pointer *OtherType + Map map[string]OtherType + PointerMap *map[string]OtherType + Slice []OtherType + SlicePointer []*OtherType + PointerSlicePointer *[]*OtherType + } + + f := fuzz.New().RandSource(rand.NewSource(0)).NilChance(0).NumElements(1, 1).Funcs( + func(o *OtherType, c fuzz.Continue) { + o.A = "Foo" + o.B = "Bar" + }, + func(op **OtherType, c fuzz.Continue) { + *op = &OtherType{"A", "B"} + }, + func(m map[string]OtherType, c fuzz.Continue) { + m["Works Because"] = OtherType{ + "Fuzzer", + "Preallocated", + } + }, + ) + object := MyType{} + f.Fuzz(&object) + bytes, err := json.MarshalIndent(&object, "", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + fmt.Printf("%s\n", string(bytes)) + // Output: + // { + // "Pointer": { + // "A": "A", + // "B": "B" + // }, + // "Map": { + // "Works Because": { + // "A": "Fuzzer", + // "B": "Preallocated" + // } + // }, + // "PointerMap": { + // "Works Because": { + // "A": "Fuzzer", + // "B": "Preallocated" + // } + // }, + // "Slice": [ + // { + // "A": "Foo", + // "B": "Bar" + // } + // ], + // "SlicePointer": [ + // { + // "A": "A", + // "B": "B" + // } + // ], + // "PointerSlicePointer": [ + // { + // "A": "A", + // "B": "B" + // } + // ] + // } +} + +func ExampleMap() { + f := fuzz.New().NilChance(0).NumElements(1, 1) + var myMap map[struct{ A, B, C int }]string + f.Fuzz(&myMap) + fmt.Printf("myMap has %v element(s).\n", len(myMap)) + // Output: + // myMap has 1 element(s). +} + +func ExampleSingle() { + f := fuzz.New() + var i int + f.Fuzz(&i) + + // Technically, we'd expect this to fail one out of 2 billion attempts... + fmt.Printf("(i == 0) == %v", i == 0) + // Output: + // (i == 0) == false +} + +func ExampleEnum() { + type MyEnum string + const ( + A MyEnum = "A" + B MyEnum = "B" + ) + type MyInfo struct { + Type MyEnum + AInfo *string + BInfo *string + } + + f := fuzz.New().NilChance(0).Funcs( + func(e *MyInfo, c fuzz.Continue) { + // Note c's embedded Rand allows for direct use. + // We could also use c.RandBool() here. + switch c.Intn(2) { + case 0: + e.Type = A + c.Fuzz(&e.AInfo) + case 1: + e.Type = B + c.Fuzz(&e.BInfo) + } + }, + ) + + for i := 0; i < 100; i++ { + var myObject MyInfo + f.Fuzz(&myObject) + switch myObject.Type { + case A: + if myObject.AInfo == nil { + fmt.Println("AInfo should have been set!") + } + if myObject.BInfo != nil { + fmt.Println("BInfo should NOT have been set!") + } + case B: + if myObject.BInfo == nil { + fmt.Println("BInfo should have been set!") + } + if myObject.AInfo != nil { + fmt.Println("AInfo should NOT have been set!") + } + default: + fmt.Println("Invalid enum value!") + } + } + // Output: +} diff --git a/vendor/github.com/google/gofuzz/fuzz_test.go b/vendor/github.com/google/gofuzz/fuzz_test.go new file mode 100644 index 000000000000..12abc8f6598b --- /dev/null +++ b/vendor/github.com/google/gofuzz/fuzz_test.go @@ -0,0 +1,384 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fuzz + +import ( + "reflect" + "testing" + "time" +) + +func TestFuzz_basic(t *testing.T) { + obj := &struct { + I int + I8 int8 + I16 int16 + I32 int32 + I64 int64 + U uint + U8 uint8 + U16 uint16 + U32 uint32 + U64 uint64 + Uptr uintptr + S string + B bool + T time.Time + }{} + + failed := map[string]int{} + for i := 0; i < 10; i++ { + New().Fuzz(obj) + + if n, v := "i", obj.I; v == 0 { + failed[n] = failed[n] + 1 + } + if n, v := "i8", obj.I8; v == 0 { + failed[n] = failed[n] + 1 + } + if n, v := "i16", obj.I16; v == 0 { + failed[n] = failed[n] + 1 + } + if n, v := "i32", obj.I32; v == 0 { + failed[n] = failed[n] + 1 + } + if n, v := "i64", obj.I64; v == 0 { + failed[n] = failed[n] + 1 + } + if n, v := "u", obj.U; v == 0 { + failed[n] = failed[n] + 1 + } + if n, v := "u8", obj.U8; v == 0 { + failed[n] = failed[n] + 1 + } + if n, v := "u16", obj.U16; v == 0 { + failed[n] = failed[n] + 1 + } + if n, v := "u32", obj.U32; v == 0 { + failed[n] = failed[n] + 1 + } + if n, v := "u64", obj.U64; v == 0 { + failed[n] = failed[n] + 1 + } + if n, v := "uptr", obj.Uptr; v == 0 { + failed[n] = failed[n] + 1 + } + if n, v := "s", obj.S; v == "" { + failed[n] = failed[n] + 1 + } + if n, v := "b", obj.B; v == false { + failed[n] = failed[n] + 1 + } + if n, v := "t", obj.T; v.IsZero() { + failed[n] = failed[n] + 1 + } + } + checkFailed(t, failed) +} + +func checkFailed(t *testing.T, failed map[string]int) { + for k, v := range failed { + if v > 8 { + t.Errorf("%v seems to not be getting set, was zero value %v times", k, v) + } + } +} + +func TestFuzz_structptr(t *testing.T) { + obj := &struct { + A *struct { + S string + } + }{} + + f := New().NilChance(.5) + failed := map[string]int{} + for i := 0; i < 10; i++ { + f.Fuzz(obj) + + if n, v := "a not nil", obj.A; v == nil { + failed[n] = failed[n] + 1 + } + if n, v := "a nil", obj.A; v != nil { + failed[n] = failed[n] + 1 + } + if n, v := "as", obj.A; v == nil || v.S == "" { + failed[n] = failed[n] + 1 + } + } + checkFailed(t, failed) +} + +// tryFuzz tries fuzzing up to 20 times. Fail if check() never passes, report the highest +// stage it ever got to. +func tryFuzz(t *testing.T, f *Fuzzer, obj interface{}, check func() (stage int, passed bool)) { + maxStage := 0 + for i := 0; i < 20; i++ { + f.Fuzz(obj) + stage, passed := check() + if stage > maxStage { + maxStage = stage + } + if passed { + return + } + } + t.Errorf("Only ever got to stage %v", maxStage) +} + +func TestFuzz_structmap(t *testing.T) { + obj := &struct { + A map[struct { + S string + }]struct { + S2 string + } + B map[string]string + }{} + + tryFuzz(t, New(), obj, func() (int, bool) { + if obj.A == nil { + return 1, false + } + if len(obj.A) == 0 { + return 2, false + } + for k, v := range obj.A { + if k.S == "" { + return 3, false + } + if v.S2 == "" { + return 4, false + } + } + + if obj.B == nil { + return 5, false + } + if len(obj.B) == 0 { + return 6, false + } + for k, v := range obj.B { + if k == "" { + return 7, false + } + if v == "" { + return 8, false + } + } + return 9, true + }) +} + +func TestFuzz_structslice(t *testing.T) { + obj := &struct { + A []struct { + S string + } + B []string + }{} + + tryFuzz(t, New(), obj, func() (int, bool) { + if obj.A == nil { + return 1, false + } + if len(obj.A) == 0 { + return 2, false + } + for _, v := range obj.A { + if v.S == "" { + return 3, false + } + } + + if obj.B == nil { + return 4, false + } + if len(obj.B) == 0 { + return 5, false + } + for _, v := range obj.B { + if v == "" { + return 6, false + } + } + return 7, true + }) +} + +func TestFuzz_custom(t *testing.T) { + obj := &struct { + A string + B *string + C map[string]string + D *map[string]string + }{} + + testPhrase := "gotcalled" + testMap := map[string]string{"C": "D"} + f := New().Funcs( + func(s *string, c Continue) { + *s = testPhrase + }, + func(m map[string]string, c Continue) { + m["C"] = "D" + }, + ) + + tryFuzz(t, f, obj, func() (int, bool) { + if obj.A != testPhrase { + return 1, false + } + if obj.B == nil { + return 2, false + } + if *obj.B != testPhrase { + return 3, false + } + if e, a := testMap, obj.C; !reflect.DeepEqual(e, a) { + return 4, false + } + if obj.D == nil { + return 5, false + } + if e, a := testMap, *obj.D; !reflect.DeepEqual(e, a) { + return 6, false + } + return 7, true + }) +} + +type SelfFuzzer string + +// Implement fuzz.Interface. +func (sf *SelfFuzzer) Fuzz(c Continue) { + *sf = selfFuzzerTestPhrase +} + +const selfFuzzerTestPhrase = "was fuzzed" + +func TestFuzz_interface(t *testing.T) { + f := New() + + var obj1 SelfFuzzer + tryFuzz(t, f, &obj1, func() (int, bool) { + if obj1 != selfFuzzerTestPhrase { + return 1, false + } + return 1, true + }) + + var obj2 map[int]SelfFuzzer + tryFuzz(t, f, &obj2, func() (int, bool) { + for _, v := range obj2 { + if v != selfFuzzerTestPhrase { + return 1, false + } + } + return 1, true + }) +} + +func TestFuzz_interfaceAndFunc(t *testing.T) { + const privateTestPhrase = "private phrase" + f := New().Funcs( + // This should take precedence over SelfFuzzer.Fuzz(). + func(s *SelfFuzzer, c Continue) { + *s = privateTestPhrase + }, + ) + + var obj1 SelfFuzzer + tryFuzz(t, f, &obj1, func() (int, bool) { + if obj1 != privateTestPhrase { + return 1, false + } + return 1, true + }) + + var obj2 map[int]SelfFuzzer + tryFuzz(t, f, &obj2, func() (int, bool) { + for _, v := range obj2 { + if v != privateTestPhrase { + return 1, false + } + } + return 1, true + }) +} + +func TestFuzz_noCustom(t *testing.T) { + type Inner struct { + Str string + } + type Outer struct { + Str string + In Inner + } + + testPhrase := "gotcalled" + f := New().Funcs( + func(outer *Outer, c Continue) { + outer.Str = testPhrase + c.Fuzz(&outer.In) + }, + func(inner *Inner, c Continue) { + inner.Str = testPhrase + }, + ) + c := Continue{f: f, Rand: f.r} + + // Fuzzer.Fuzz() + obj1 := Outer{} + f.Fuzz(&obj1) + if obj1.Str != testPhrase { + t.Errorf("expected Outer custom function to have been called") + } + if obj1.In.Str != testPhrase { + t.Errorf("expected Inner custom function to have been called") + } + + // Continue.Fuzz() + obj2 := Outer{} + c.Fuzz(&obj2) + if obj2.Str != testPhrase { + t.Errorf("expected Outer custom function to have been called") + } + if obj2.In.Str != testPhrase { + t.Errorf("expected Inner custom function to have been called") + } + + // Fuzzer.FuzzNoCustom() + obj3 := Outer{} + f.FuzzNoCustom(&obj3) + if obj3.Str == testPhrase { + t.Errorf("expected Outer custom function to not have been called") + } + if obj3.In.Str != testPhrase { + t.Errorf("expected Inner custom function to have been called") + } + + // Continue.FuzzNoCustom() + obj4 := Outer{} + c.FuzzNoCustom(&obj4) + if obj4.Str == testPhrase { + t.Errorf("expected Outer custom function to not have been called") + } + if obj4.In.Str != testPhrase { + t.Errorf("expected Inner custom function to have been called") + } +} diff --git a/vendor/github.com/gregjones/httpcache/BUILD b/vendor/github.com/gregjones/httpcache/BUILD index fa8c10683bb6..ea6243e0bce2 100644 --- a/vendor/github.com/gregjones/httpcache/BUILD +++ b/vendor/github.com/gregjones/httpcache/BUILD @@ -1,14 +1,17 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["httpcache.go"], importpath = "github.com/gregjones/httpcache", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["httpcache_test.go"], + importpath = "github.com/gregjones/httpcache", + library = ":go_default_library", ) filegroup( @@ -25,4 +28,5 @@ filegroup( "//vendor/github.com/gregjones/httpcache/diskcache:all-srcs", ], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/gregjones/httpcache/diskcache/BUILD b/vendor/github.com/gregjones/httpcache/diskcache/BUILD index 62dc7d1e5bdc..d4ba1c7882bd 100644 --- a/vendor/github.com/gregjones/httpcache/diskcache/BUILD +++ b/vendor/github.com/gregjones/httpcache/diskcache/BUILD @@ -1,17 +1,20 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["diskcache.go"], importpath = "github.com/gregjones/httpcache/diskcache", + visibility = ["//visibility:public"], deps = ["//vendor/github.com/peterbourgon/diskv:go_default_library"], ) +go_test( + name = "go_default_test", + srcs = ["diskcache_test.go"], + importpath = "github.com/gregjones/httpcache/diskcache", + library = ":go_default_library", +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -23,4 +26,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache_test.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache_test.go new file mode 100644 index 000000000000..35c76cbd1cdb --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/diskcache/diskcache_test.go @@ -0,0 +1,42 @@ +package diskcache + +import ( + "bytes" + "io/ioutil" + "os" + "testing" +) + +func TestDiskCache(t *testing.T) { + tempDir, err := ioutil.TempDir("", "httpcache") + if err != nil { + t.Fatalf("TempDir: %v", err) + } + defer os.RemoveAll(tempDir) + + cache := New(tempDir) + + key := "testKey" + _, ok := cache.Get(key) + if ok { + t.Fatal("retrieved key before adding it") + } + + val := []byte("some bytes") + cache.Set(key, val) + + retVal, ok := cache.Get(key) + if !ok { + t.Fatal("could not retrieve an element we just added") + } + if !bytes.Equal(retVal, val) { + t.Fatal("retrieved a different value than what we put in") + } + + cache.Delete(key) + + _, ok = cache.Get(key) + if ok { + t.Fatal("deleted key still present") + } +} diff --git a/vendor/github.com/gregjones/httpcache/httpcache_test.go b/vendor/github.com/gregjones/httpcache/httpcache_test.go new file mode 100644 index 000000000000..3fbe5c85d174 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/httpcache_test.go @@ -0,0 +1,1208 @@ +package httpcache + +import ( + "bytes" + "errors" + "flag" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "strconv" + "testing" + "time" +) + +var s struct { + server *httptest.Server + client http.Client + transport *Transport +} + +type fakeClock struct { + elapsed time.Duration +} + +func (c *fakeClock) since(t time.Time) time.Duration { + return c.elapsed +} + +func TestMain(m *testing.M) { + flag.Parse() + setup() + code := m.Run() + teardown() + os.Exit(code) +} + +func setup() { + tp := NewMemoryCacheTransport() + client := http.Client{Transport: tp} + s.transport = tp + s.client = client + + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + + mux.HandleFunc("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Cache-Control", "max-age=3600") + })) + + mux.HandleFunc("/method", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Cache-Control", "max-age=3600") + w.Write([]byte(r.Method)) + })) + + mux.HandleFunc("/range", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + lm := "Fri, 14 Dec 2010 01:01:50 GMT" + if r.Header.Get("if-modified-since") == lm { + w.WriteHeader(http.StatusNotModified) + return + } + w.Header().Set("last-modified", lm) + if r.Header.Get("range") == "bytes=4-9" { + w.WriteHeader(http.StatusPartialContent) + w.Write([]byte(" text ")) + return + } + w.Write([]byte("Some text content")) + })) + + mux.HandleFunc("/nostore", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Cache-Control", "no-store") + })) + + mux.HandleFunc("/etag", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + etag := "124567" + if r.Header.Get("if-none-match") == etag { + w.WriteHeader(http.StatusNotModified) + return + } + w.Header().Set("etag", etag) + })) + + mux.HandleFunc("/lastmodified", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + lm := "Fri, 14 Dec 2010 01:01:50 GMT" + if r.Header.Get("if-modified-since") == lm { + w.WriteHeader(http.StatusNotModified) + return + } + w.Header().Set("last-modified", lm) + })) + + mux.HandleFunc("/varyaccept", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Cache-Control", "max-age=3600") + w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Vary", "Accept") + w.Write([]byte("Some text content")) + })) + + mux.HandleFunc("/doublevary", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Cache-Control", "max-age=3600") + w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Vary", "Accept, Accept-Language") + w.Write([]byte("Some text content")) + })) + mux.HandleFunc("/2varyheaders", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Cache-Control", "max-age=3600") + w.Header().Set("Content-Type", "text/plain") + w.Header().Add("Vary", "Accept") + w.Header().Add("Vary", "Accept-Language") + w.Write([]byte("Some text content")) + })) + mux.HandleFunc("/varyunused", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Cache-Control", "max-age=3600") + w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Vary", "X-Madeup-Header") + w.Write([]byte("Some text content")) + })) + + updateFieldsCounter := 0 + mux.HandleFunc("/updatefields", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Counter", strconv.Itoa(updateFieldsCounter)) + w.Header().Set("Etag", `"e"`) + updateFieldsCounter++ + if r.Header.Get("if-none-match") != "" { + w.WriteHeader(http.StatusNotModified) + return + } + w.Write([]byte("Some text content")) + })) +} + +func teardown() { + s.server.Close() +} + +func resetTest() { + s.transport.Cache = NewMemoryCache() + clock = &realClock{} +} + +// TestCacheableMethod ensures that uncacheable method does not get stored +// in cache and get incorrectly used for a following cacheable method request. +func TestCacheableMethod(t *testing.T) { + resetTest() + { + req, err := http.NewRequest("POST", s.server.URL+"/method", nil) + if err != nil { + t.Fatal(err) + } + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + _, err = io.Copy(&buf, resp.Body) + if err != nil { + t.Fatal(err) + } + err = resp.Body.Close() + if err != nil { + t.Fatal(err) + } + if got, want := buf.String(), "POST"; got != want { + t.Errorf("got %q, want %q", got, want) + } + if resp.StatusCode != http.StatusOK { + t.Errorf("response status code isn't 200 OK: %v", resp.StatusCode) + } + } + { + req, err := http.NewRequest("GET", s.server.URL+"/method", nil) + if err != nil { + t.Fatal(err) + } + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + _, err = io.Copy(&buf, resp.Body) + if err != nil { + t.Fatal(err) + } + err = resp.Body.Close() + if err != nil { + t.Fatal(err) + } + if got, want := buf.String(), "GET"; got != want { + t.Errorf("got wrong body %q, want %q", got, want) + } + if resp.StatusCode != http.StatusOK { + t.Errorf("response status code isn't 200 OK: %v", resp.StatusCode) + } + if resp.Header.Get(XFromCache) != "" { + t.Errorf("XFromCache header isn't blank") + } + } +} + +func TestDontStorePartialRangeInCache(t *testing.T) { + resetTest() + { + req, err := http.NewRequest("GET", s.server.URL+"/range", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("range", "bytes=4-9") + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + _, err = io.Copy(&buf, resp.Body) + if err != nil { + t.Fatal(err) + } + err = resp.Body.Close() + if err != nil { + t.Fatal(err) + } + if got, want := buf.String(), " text "; got != want { + t.Errorf("got %q, want %q", got, want) + } + if resp.StatusCode != http.StatusPartialContent { + t.Errorf("response status code isn't 206 Partial Content: %v", resp.StatusCode) + } + } + { + req, err := http.NewRequest("GET", s.server.URL+"/range", nil) + if err != nil { + t.Fatal(err) + } + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + _, err = io.Copy(&buf, resp.Body) + if err != nil { + t.Fatal(err) + } + err = resp.Body.Close() + if err != nil { + t.Fatal(err) + } + if got, want := buf.String(), "Some text content"; got != want { + t.Errorf("got %q, want %q", got, want) + } + if resp.StatusCode != http.StatusOK { + t.Errorf("response status code isn't 200 OK: %v", resp.StatusCode) + } + if resp.Header.Get(XFromCache) != "" { + t.Error("XFromCache header isn't blank") + } + } + { + req, err := http.NewRequest("GET", s.server.URL+"/range", nil) + if err != nil { + t.Fatal(err) + } + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + _, err = io.Copy(&buf, resp.Body) + if err != nil { + t.Fatal(err) + } + err = resp.Body.Close() + if err != nil { + t.Fatal(err) + } + if got, want := buf.String(), "Some text content"; got != want { + t.Errorf("got %q, want %q", got, want) + } + if resp.StatusCode != http.StatusOK { + t.Errorf("response status code isn't 200 OK: %v", resp.StatusCode) + } + if resp.Header.Get(XFromCache) != "1" { + t.Errorf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + } + { + req, err := http.NewRequest("GET", s.server.URL+"/range", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("range", "bytes=4-9") + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + _, err = io.Copy(&buf, resp.Body) + if err != nil { + t.Fatal(err) + } + err = resp.Body.Close() + if err != nil { + t.Fatal(err) + } + if got, want := buf.String(), " text "; got != want { + t.Errorf("got %q, want %q", got, want) + } + if resp.StatusCode != http.StatusPartialContent { + t.Errorf("response status code isn't 206 Partial Content: %v", resp.StatusCode) + } + } +} + +func TestGetOnlyIfCachedHit(t *testing.T) { + resetTest() + { + req, err := http.NewRequest("GET", s.server.URL, nil) + if err != nil { + t.Fatal(err) + } + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + { + req, err := http.NewRequest("GET", s.server.URL, nil) + if err != nil { + t.Fatal(err) + } + req.Header.Add("cache-control", "only-if-cached") + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + if resp.StatusCode != http.StatusOK { + t.Fatalf("response status code isn't 200 OK: %v", resp.StatusCode) + } + } +} + +func TestGetOnlyIfCachedMiss(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL, nil) + if err != nil { + t.Fatal(err) + } + req.Header.Add("cache-control", "only-if-cached") + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + if resp.StatusCode != http.StatusGatewayTimeout { + t.Fatalf("response status code isn't 504 GatewayTimeout: %v", resp.StatusCode) + } +} + +func TestGetNoStoreRequest(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL, nil) + if err != nil { + t.Fatal(err) + } + req.Header.Add("Cache-Control", "no-store") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } +} + +func TestGetNoStoreResponse(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL+"/nostore", nil) + if err != nil { + t.Fatal(err) + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } +} + +func TestGetWithEtag(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL+"/etag", nil) + if err != nil { + t.Fatal(err) + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + // additional assertions to verify that 304 response is converted properly + if resp.StatusCode != http.StatusOK { + t.Fatalf("response status code isn't 200 OK: %v", resp.StatusCode) + } + if _, ok := resp.Header["Connection"]; ok { + t.Fatalf("Connection header isn't absent") + } + } +} + +func TestGetWithLastModified(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL+"/lastmodified", nil) + if err != nil { + t.Fatal(err) + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + } +} + +func TestGetWithVary(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL+"/varyaccept", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("Accept", "text/plain") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get("Vary") != "Accept" { + t.Fatalf(`Vary header isn't "Accept": %v`, resp.Header.Get("Vary")) + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + } + req.Header.Set("Accept", "text/html") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + req.Header.Set("Accept", "") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } +} + +func TestGetWithDoubleVary(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL+"/doublevary", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("Accept", "text/plain") + req.Header.Set("Accept-Language", "da, en-gb;q=0.8, en;q=0.7") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get("Vary") == "" { + t.Fatalf(`Vary header is blank`) + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + } + req.Header.Set("Accept-Language", "") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + req.Header.Set("Accept-Language", "da") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } +} + +func TestGetWith2VaryHeaders(t *testing.T) { + resetTest() + // Tests that multiple Vary headers' comma-separated lists are + // merged. See https://github.com/gregjones/httpcache/issues/27. + const ( + accept = "text/plain" + acceptLanguage = "da, en-gb;q=0.8, en;q=0.7" + ) + req, err := http.NewRequest("GET", s.server.URL+"/2varyheaders", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("Accept", accept) + req.Header.Set("Accept-Language", acceptLanguage) + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get("Vary") == "" { + t.Fatalf(`Vary header is blank`) + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + } + req.Header.Set("Accept-Language", "") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + req.Header.Set("Accept-Language", "da") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + req.Header.Set("Accept-Language", acceptLanguage) + req.Header.Set("Accept", "") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + req.Header.Set("Accept", "image/png") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + } +} + +func TestGetVaryUnused(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL+"/varyunused", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("Accept", "text/plain") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get("Vary") == "" { + t.Fatalf(`Vary header is blank`) + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + } +} + +func TestUpdateFields(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL+"/updatefields", nil) + if err != nil { + t.Fatal(err) + } + var counter, counter2 string + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + counter = resp.Header.Get("x-counter") + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + counter2 = resp.Header.Get("x-counter") + } + if counter == counter2 { + t.Fatalf(`both "x-counter" values are equal: %v %v`, counter, counter2) + } +} + +func TestParseCacheControl(t *testing.T) { + resetTest() + h := http.Header{} + for range parseCacheControl(h) { + t.Fatal("cacheControl should be empty") + } + + h.Set("cache-control", "no-cache") + { + cc := parseCacheControl(h) + if _, ok := cc["foo"]; ok { + t.Error(`Value "foo" shouldn't exist`) + } + noCache, ok := cc["no-cache"] + if !ok { + t.Fatalf(`"no-cache" value isn't set`) + } + if noCache != "" { + t.Fatalf(`"no-cache" value isn't blank: %v`, noCache) + } + } + h.Set("cache-control", "no-cache, max-age=3600") + { + cc := parseCacheControl(h) + noCache, ok := cc["no-cache"] + if !ok { + t.Fatalf(`"no-cache" value isn't set`) + } + if noCache != "" { + t.Fatalf(`"no-cache" value isn't blank: %v`, noCache) + } + if cc["max-age"] != "3600" { + t.Fatalf(`"max-age" value isn't "3600": %v`, cc["max-age"]) + } + } +} + +func TestNoCacheRequestExpiration(t *testing.T) { + resetTest() + respHeaders := http.Header{} + respHeaders.Set("Cache-Control", "max-age=7200") + + reqHeaders := http.Header{} + reqHeaders.Set("Cache-Control", "no-cache") + if getFreshness(respHeaders, reqHeaders) != transparent { + t.Fatal("freshness isn't transparent") + } +} + +func TestNoCacheResponseExpiration(t *testing.T) { + resetTest() + respHeaders := http.Header{} + respHeaders.Set("Cache-Control", "no-cache") + respHeaders.Set("Expires", "Wed, 19 Apr 3000 11:43:00 GMT") + + reqHeaders := http.Header{} + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func TestReqMustRevalidate(t *testing.T) { + resetTest() + // not paying attention to request setting max-stale means never returning stale + // responses, so always acting as if must-revalidate is set + respHeaders := http.Header{} + + reqHeaders := http.Header{} + reqHeaders.Set("Cache-Control", "must-revalidate") + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func TestRespMustRevalidate(t *testing.T) { + resetTest() + respHeaders := http.Header{} + respHeaders.Set("Cache-Control", "must-revalidate") + + reqHeaders := http.Header{} + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func TestFreshExpiration(t *testing.T) { + resetTest() + now := time.Now() + respHeaders := http.Header{} + respHeaders.Set("date", now.Format(time.RFC1123)) + respHeaders.Set("expires", now.Add(time.Duration(2)*time.Second).Format(time.RFC1123)) + + reqHeaders := http.Header{} + if getFreshness(respHeaders, reqHeaders) != fresh { + t.Fatal("freshness isn't fresh") + } + + clock = &fakeClock{elapsed: 3 * time.Second} + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func TestMaxAge(t *testing.T) { + resetTest() + now := time.Now() + respHeaders := http.Header{} + respHeaders.Set("date", now.Format(time.RFC1123)) + respHeaders.Set("cache-control", "max-age=2") + + reqHeaders := http.Header{} + if getFreshness(respHeaders, reqHeaders) != fresh { + t.Fatal("freshness isn't fresh") + } + + clock = &fakeClock{elapsed: 3 * time.Second} + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func TestMaxAgeZero(t *testing.T) { + resetTest() + now := time.Now() + respHeaders := http.Header{} + respHeaders.Set("date", now.Format(time.RFC1123)) + respHeaders.Set("cache-control", "max-age=0") + + reqHeaders := http.Header{} + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func TestBothMaxAge(t *testing.T) { + resetTest() + now := time.Now() + respHeaders := http.Header{} + respHeaders.Set("date", now.Format(time.RFC1123)) + respHeaders.Set("cache-control", "max-age=2") + + reqHeaders := http.Header{} + reqHeaders.Set("cache-control", "max-age=0") + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func TestMinFreshWithExpires(t *testing.T) { + resetTest() + now := time.Now() + respHeaders := http.Header{} + respHeaders.Set("date", now.Format(time.RFC1123)) + respHeaders.Set("expires", now.Add(time.Duration(2)*time.Second).Format(time.RFC1123)) + + reqHeaders := http.Header{} + reqHeaders.Set("cache-control", "min-fresh=1") + if getFreshness(respHeaders, reqHeaders) != fresh { + t.Fatal("freshness isn't fresh") + } + + reqHeaders = http.Header{} + reqHeaders.Set("cache-control", "min-fresh=2") + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func TestEmptyMaxStale(t *testing.T) { + resetTest() + now := time.Now() + respHeaders := http.Header{} + respHeaders.Set("date", now.Format(time.RFC1123)) + respHeaders.Set("cache-control", "max-age=20") + + reqHeaders := http.Header{} + reqHeaders.Set("cache-control", "max-stale") + clock = &fakeClock{elapsed: 10 * time.Second} + if getFreshness(respHeaders, reqHeaders) != fresh { + t.Fatal("freshness isn't fresh") + } + + clock = &fakeClock{elapsed: 60 * time.Second} + if getFreshness(respHeaders, reqHeaders) != fresh { + t.Fatal("freshness isn't fresh") + } +} + +func TestMaxStaleValue(t *testing.T) { + resetTest() + now := time.Now() + respHeaders := http.Header{} + respHeaders.Set("date", now.Format(time.RFC1123)) + respHeaders.Set("cache-control", "max-age=10") + + reqHeaders := http.Header{} + reqHeaders.Set("cache-control", "max-stale=20") + clock = &fakeClock{elapsed: 5 * time.Second} + if getFreshness(respHeaders, reqHeaders) != fresh { + t.Fatal("freshness isn't fresh") + } + + clock = &fakeClock{elapsed: 15 * time.Second} + if getFreshness(respHeaders, reqHeaders) != fresh { + t.Fatal("freshness isn't fresh") + } + + clock = &fakeClock{elapsed: 30 * time.Second} + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func containsHeader(headers []string, header string) bool { + for _, v := range headers { + if http.CanonicalHeaderKey(v) == http.CanonicalHeaderKey(header) { + return true + } + } + return false +} + +func TestGetEndToEndHeaders(t *testing.T) { + resetTest() + var ( + headers http.Header + end2end []string + ) + + headers = http.Header{} + headers.Set("content-type", "text/html") + headers.Set("te", "deflate") + + end2end = getEndToEndHeaders(headers) + if !containsHeader(end2end, "content-type") { + t.Fatal(`doesn't contain "content-type" header`) + } + if containsHeader(end2end, "te") { + t.Fatal(`doesn't contain "te" header`) + } + + headers = http.Header{} + headers.Set("connection", "content-type") + headers.Set("content-type", "text/csv") + headers.Set("te", "deflate") + end2end = getEndToEndHeaders(headers) + if containsHeader(end2end, "connection") { + t.Fatal(`doesn't contain "connection" header`) + } + if containsHeader(end2end, "content-type") { + t.Fatal(`doesn't contain "content-type" header`) + } + if containsHeader(end2end, "te") { + t.Fatal(`doesn't contain "te" header`) + } + + headers = http.Header{} + end2end = getEndToEndHeaders(headers) + if len(end2end) != 0 { + t.Fatal(`non-zero end2end headers`) + } + + headers = http.Header{} + headers.Set("connection", "content-type") + end2end = getEndToEndHeaders(headers) + if len(end2end) != 0 { + t.Fatal(`non-zero end2end headers`) + } +} + +type transportMock struct { + response *http.Response + err error +} + +func (t transportMock) RoundTrip(req *http.Request) (resp *http.Response, err error) { + return t.response, t.err +} + +func TestStaleIfErrorRequest(t *testing.T) { + resetTest() + now := time.Now() + tmock := transportMock{ + response: &http.Response{ + Status: http.StatusText(http.StatusOK), + StatusCode: http.StatusOK, + Header: http.Header{ + "Date": []string{now.Format(time.RFC1123)}, + "Cache-Control": []string{"no-cache"}, + }, + Body: ioutil.NopCloser(bytes.NewBuffer([]byte("some data"))), + }, + err: nil, + } + tp := NewMemoryCacheTransport() + tp.Transport = &tmock + + // First time, response is cached on success + r, _ := http.NewRequest("GET", "http://somewhere.com/", nil) + r.Header.Set("Cache-Control", "stale-if-error") + resp, err := tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } + + // On failure, response is returned from the cache + tmock.response = nil + tmock.err = errors.New("some error") + resp, err = tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } +} + +func TestStaleIfErrorRequestLifetime(t *testing.T) { + resetTest() + now := time.Now() + tmock := transportMock{ + response: &http.Response{ + Status: http.StatusText(http.StatusOK), + StatusCode: http.StatusOK, + Header: http.Header{ + "Date": []string{now.Format(time.RFC1123)}, + "Cache-Control": []string{"no-cache"}, + }, + Body: ioutil.NopCloser(bytes.NewBuffer([]byte("some data"))), + }, + err: nil, + } + tp := NewMemoryCacheTransport() + tp.Transport = &tmock + + // First time, response is cached on success + r, _ := http.NewRequest("GET", "http://somewhere.com/", nil) + r.Header.Set("Cache-Control", "stale-if-error=100") + resp, err := tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } + + // On failure, response is returned from the cache + tmock.response = nil + tmock.err = errors.New("some error") + resp, err = tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } + + // Same for http errors + tmock.response = &http.Response{StatusCode: http.StatusInternalServerError} + tmock.err = nil + resp, err = tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } + + // If failure last more than max stale, error is returned + clock = &fakeClock{elapsed: 200 * time.Second} + resp, err = tp.RoundTrip(r) + if err != tmock.err { + t.Fatalf("got err %v, want %v", err, tmock.err) + } +} + +func TestStaleIfErrorResponse(t *testing.T) { + resetTest() + now := time.Now() + tmock := transportMock{ + response: &http.Response{ + Status: http.StatusText(http.StatusOK), + StatusCode: http.StatusOK, + Header: http.Header{ + "Date": []string{now.Format(time.RFC1123)}, + "Cache-Control": []string{"no-cache, stale-if-error"}, + }, + Body: ioutil.NopCloser(bytes.NewBuffer([]byte("some data"))), + }, + err: nil, + } + tp := NewMemoryCacheTransport() + tp.Transport = &tmock + + // First time, response is cached on success + r, _ := http.NewRequest("GET", "http://somewhere.com/", nil) + resp, err := tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } + + // On failure, response is returned from the cache + tmock.response = nil + tmock.err = errors.New("some error") + resp, err = tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } +} + +func TestStaleIfErrorResponseLifetime(t *testing.T) { + resetTest() + now := time.Now() + tmock := transportMock{ + response: &http.Response{ + Status: http.StatusText(http.StatusOK), + StatusCode: http.StatusOK, + Header: http.Header{ + "Date": []string{now.Format(time.RFC1123)}, + "Cache-Control": []string{"no-cache, stale-if-error=100"}, + }, + Body: ioutil.NopCloser(bytes.NewBuffer([]byte("some data"))), + }, + err: nil, + } + tp := NewMemoryCacheTransport() + tp.Transport = &tmock + + // First time, response is cached on success + r, _ := http.NewRequest("GET", "http://somewhere.com/", nil) + resp, err := tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } + + // On failure, response is returned from the cache + tmock.response = nil + tmock.err = errors.New("some error") + resp, err = tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } + + // If failure last more than max stale, error is returned + clock = &fakeClock{elapsed: 200 * time.Second} + resp, err = tp.RoundTrip(r) + if err != tmock.err { + t.Fatalf("got err %v, want %v", err, tmock.err) + } +} diff --git a/vendor/github.com/inconshreveable/mousetrap/BUILD b/vendor/github.com/inconshreveable/mousetrap/BUILD index 1c90f49d556a..2b8e060c6e93 100644 --- a/vendor/github.com/inconshreveable/mousetrap/BUILD +++ b/vendor/github.com/inconshreveable/mousetrap/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", @@ -17,6 +12,7 @@ go_library( "//conditions:default": [], }), importpath = "github.com/inconshreveable/mousetrap", + visibility = ["//visibility:public"], ) filegroup( @@ -30,4 +26,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/influxdata/influxdb/.dockerignore b/vendor/github.com/influxdata/influxdb/.dockerignore new file mode 100644 index 000000000000..378eac25d311 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.dockerignore @@ -0,0 +1 @@ +build diff --git a/vendor/github.com/influxdata/influxdb/.gitignore b/vendor/github.com/influxdata/influxdb/.gitignore new file mode 100644 index 000000000000..6c13a672db3b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.gitignore @@ -0,0 +1,73 @@ +# Keep editor-specific, non-project specific ignore rules in global .gitignore: +# https://help.github.com/articles/ignoring-files/#create-a-global-gitignore + +*~ +src/ + +config.json +/bin/ + +/query/a.out* + +# ignore generated files. +cmd/influxd/version.go + +# executables + +*.test + +influx_tsm +**/influx_tsm +!**/influx_tsm/ + +influx_stress +**/influx_stress +!**/influx_stress/ + +influxd +**/influxd +!**/influxd/ + +influx +**/influx +!**/influx/ + +influxdb +**/influxdb +!**/influxdb/ + +influx_inspect +**/influx_inspect +!**/influx_inspect/ + +/benchmark-tool +/main +/benchmark-storage +godef +gosym +gocode +inspect-raft + +# dependencies +out_rpm/ +packages/ + +# autconf +autom4te.cache/ +config.log +config.status + +# log file +influxdb.log +benchmark.log + +# config file +config.toml + +# test data files +integration/migration_data/ + +# man outputs +man/*.xml +man/*.1 +man/*.1.gz diff --git a/vendor/github.com/influxdata/influxdb/.mention-bot b/vendor/github.com/influxdata/influxdb/.mention-bot new file mode 100644 index 000000000000..5f8689bb637c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.mention-bot @@ -0,0 +1,6 @@ +{ + "maxReviewers": 3, + "fileBlacklist": ["CHANGELOG.md"], + "userBlacklist": ["pauldix", "toddboom", "aviau", "mark-rushakoff"], + "requiredOrgs": ["influxdata"] +} diff --git a/vendor/github.com/influxdata/influxdb/BUILD b/vendor/github.com/influxdata/influxdb/BUILD new file mode 100644 index 000000000000..ab152276eb13 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "errors.go", + "influxdb.go", + "node.go", + ], + importpath = "github.com/influxdata/influxdb", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/influxdata/influxdb/client:all-srcs", + "//vendor/github.com/influxdata/influxdb/models:all-srcs", + "//vendor/github.com/influxdata/influxdb/pkg/escape:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/influxdata/influxdb/CHANGELOG.md b/vendor/github.com/influxdata/influxdb/CHANGELOG.md new file mode 100644 index 000000000000..fbf510f47b58 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/CHANGELOG.md @@ -0,0 +1,2446 @@ +## v1.2.0 [unreleased] + +### Release Notes + +### Deprecations + +The stress tool `influx_stress` will be removed in a subsequent release. We recommend using [`influx-stress`](https://github.com/influxdata/influx-stress) as a replacement. + +### Features + +- [#7066](https://github.com/influxdata/influxdb/issues/7066): Add support for secure transmission via collectd. +- [#7554](https://github.com/influxdata/influxdb/pull/7554): update latest dependencies with Godeps. +- [#7368](https://github.com/influxdata/influxdb/pull/7368): Introduce syntax for marking a partial response with chunking. +- [#7356](https://github.com/influxdata/influxdb/issues/7356): Use X-Forwarded-For IP address in HTTP logger if present. +- [#7601](https://github.com/influxdata/influxdb/issues/7601): Prune data in meta store for deleted shards. +- [#7669](https://github.com/influxdata/influxdb/issues/7669): Uncomment section headers from the default configuration file. +- [#7684](https://github.com/influxdata/influxdb/issues/7684): Update Go version to 1.7.4. +- [#7036](https://github.com/influxdata/influxdb/issues/7036): Switch logging to use structured logging everywhere. +- [#7723](https://github.com/influxdata/influxdb/pull/7723): Remove the override of GOMAXPROCS. +- [#7633](https://github.com/influxdata/influxdb/pull/7633): improve write performance significantly. + +### Bugfixes + +- [#7621](https://github.com/influxdata/influxdb/issues/7621): Expand string and boolean fields when using a wildcard with `sample()`. +- [#7616](https://github.com/influxdata/influxdb/pull/7616): Fix chuid argument order in init script @ccasey +- [#7656](https://github.com/influxdata/influxdb/issues/7656): Fix cross-platform backup/restore @allenpetersen +- [#7650](https://github.com/influxdata/influxdb/issues/7650): Ensures that all user privileges associated with a database are removed when the database is dropped. +- [#7659](https://github.com/influxdata/influxdb/issues/7659): Fix CLI import bug when using self-signed SSL certificates. +- [#7698](https://github.com/influxdata/influxdb/pull/7698): CLI was caching db/rp for insert into statements. +- [#6527](https://github.com/influxdata/influxdb/issues/6527): 0.12.2 Influx CLI client PRECISION returns "Unknown precision.... + +- [#7396](https://github.com/influxdata/influxdb/issues/7396): CLI should use spaces for alignment, not tabs. +- [#7615](https://github.com/influxdata/influxdb/issues/7615): Reject invalid subscription urls @allenpetersen + +## v1.1.1 [unreleased] + +### Bugfixes + +- [#7625](https://github.com/influxdata/influxdb/issues/7625): Fix incorrect tag value in error message. +- [#7634](https://github.com/influxdata/influxdb/issues/7634): Return the time from a percentile call on an integer. + +## v1.1.0 [2016-11-14] + +### Release Notes + +This release is built with go 1.7.3 and provides many performance optimizations, stability changes and a few new query capabilities. If upgrading from a prior version, please read the configuration changes below section before upgrading. + +### Deprecations + +The admin interface is deprecated and will be removed in a subsequent release. The configuration setting to enable the admin UI is now disabled by default, but can be enabled if necessary. We recommend using [Chronograf](https://github.com/influxdata/chronograf) or [Grafana](https://github.com/grafana/grafana) as a replacement. + +### Configuration Changes + +The following configuration changes may need to changed before upgrading to `1.1.0` from prior versions. + +#### `[admin]` Section + +* `enabled` now default to false. If you are currently using the admin interaface, you will need to change this value to `true` to re-enable it. The admin interface is currently deprecated and will be removed in a subsequent release. + +#### `[data]` Section + +* `max-values-per-tag` was added with a default of 100,000, but can be disabled by setting it to `0`. Existing measurements with tags that exceed this limit will continue to load, but writes that would cause the tags cardinality to increase will be dropped and a `partial write` error will be returned to the caller. This limit can be used to prevent high cardinality tag values from being written to a measurement. +* `cache-max-memory-size` has been increased to from `524288000` to `1048576000`. This setting is the maximum amount of RAM, in bytes, a shard cache can use before it rejects writes with an error. Setting this value to `0` disables the limit. +* `cache-snapshot-write-cold-duration` has been decreased from `1h` to `10m`. This setting determines how long values will stay in the shard cache while the shard is cold for writes. +* `compact-full-write-cold-duration` has been decreased from `24h` to `4h`. The shorter duration allows cold shards to be compacted to an optimal state more quickly. + +### Features + +The query language has been extended with a few new features: + +* New `cumulative_sum` function - [#7388](https://github.com/influxdata/influxdb/pull/7388) +* New `linear` fill option - [#7408](https://github.com/influxdata/influxdb/pull/7408) +* Support `ON` for `SHOW` commands - [#7295](https://github.com/influxdata/influxdb/pull/7295) +* Support regex on fields keys in select clause - [#7442](https://github.com/influxdata/influxdb/pull/7442) + +All Changes: + +- [#7415](https://github.com/influxdata/influxdb/pull/7415): Add sample function to query language. +- [#7403](https://github.com/influxdata/influxdb/pull/7403): Add `fill(linear)` to query language. +- [#7120](https://github.com/influxdata/influxdb/issues/7120): Add additional statistics to query executor. +- [#7135](https://github.com/influxdata/influxdb/pull/7135): Support enable HTTP service over unix domain socket. Thanks @oiooj +- [#3634](https://github.com/influxdata/influxdb/issues/3634): Support mixed duration units. +- [#7099](https://github.com/influxdata/influxdb/pull/7099): Implement text/csv content encoding for the response writer. +- [#6992](https://github.com/influxdata/influxdb/issues/6992): Support tools for running async queries. +- [#7136](https://github.com/influxdata/influxdb/pull/7136): Update jwt-go dependency to version 3. +- [#6962](https://github.com/influxdata/influxdb/issues/6962): Support ON and use default database for SHOW commands. +- [#7268](https://github.com/influxdata/influxdb/pull/7268): More man pages for the other tools we package and compress man pages fully. +- [#7305](https://github.com/influxdata/influxdb/pull/7305): UDP Client: Split large points. Thanks @vlasad +- [#7115](https://github.com/influxdata/influxdb/issues/7115): Feature request: `influx inspect -export` should dump WAL files. +- [#7388](https://github.com/influxdata/influxdb/pull/7388): Implement cumulative_sum() function. +- [#7441](https://github.com/influxdata/influxdb/pull/7441): Speed up shutdown by closing shards concurrently. +- [#7146](https://github.com/influxdata/influxdb/issues/7146): Add max-values-per-tag to limit high tag cardinality data +- [#5955](https://github.com/influxdata/influxdb/issues/5955): Make regex work on field and dimension keys in SELECT clause. +- [#7470](https://github.com/influxdata/influxdb/pull/7470): Reduce map allocations when computing the TagSet of a measurement. +- [#6894](https://github.com/influxdata/influxdb/issues/6894): Support `INFLUX_USERNAME` and `INFLUX_PASSWORD` for setting username/password in the CLI. +- [#6896](https://github.com/influxdata/influxdb/issues/6896): Correctly read in input from a non-interactive stream for the CLI. +- [#7463](https://github.com/influxdata/influxdb/pull/7463): Make input plugin services open/close idempotent. +- [#7473](https://github.com/influxdata/influxdb/pull/7473): Align binary math expression streams by time. +- [#7281](https://github.com/influxdata/influxdb/pull/7281): Add stats for active compactions, compaction errors. +- [#7496](https://github.com/influxdata/influxdb/pull/7496): Filter out series within shards that do not have data for that series. +- [#7480](https://github.com/influxdata/influxdb/pull/7480): Improve compaction planning performance by caching tsm file stats. +- [#7320](https://github.com/influxdata/influxdb/issues/7320): Update defaults in config for latest best practices +- [#7495](https://github.com/influxdata/influxdb/pull/7495): Rewrite regexes of the form host = /^server-a$/ to host = 'server-a', to take advantage of the tsdb index. +- [#6704](https://github.com/influxdata/influxdb/issues/6704): Optimize first/last when no group by interval is present. +- [#4461](https://github.com/influxdata/influxdb/issues/4461): Change default time boundaries for raw queries. + +### Bugfixes + +- [#7392](https://github.com/influxdata/influxdb/pull/7392): Enable https subscriptions to work with custom CA certificates. +- [#1834](https://github.com/influxdata/influxdb/issues/1834): Drop time when used as a tag or field key. +- [#7152](https://github.com/influxdata/influxdb/issues/7152): Decrement number of measurements only once when deleting the last series from a measurement. +- [#7177](https://github.com/influxdata/influxdb/issues/7177): Fix base64 encoding issue with /debug/vars stats. +- [#7196](https://github.com/influxdata/influxdb/issues/7196): Fix mmap dereferencing, fixes #7183, #7180 +- [#7013](https://github.com/influxdata/influxdb/issues/7013): Fix the dollar sign so it properly handles reserved keywords. +- [#7297](https://github.com/influxdata/influxdb/issues/7297): Use consistent column output from the CLI for column formatted responses. +- [#7231](https://github.com/influxdata/influxdb/issues/7231): Duplicate parsing bug in ALTER RETENTION POLICY. +- [#7285](https://github.com/influxdata/influxdb/issues/7285): Correctly use password-type field in Admin UI. Thanks @dandv! +- [#2792](https://github.com/influxdata/influxdb/issues/2792): Exceeding max retention policy duration gives incorrect error message +- [#7226](https://github.com/influxdata/influxdb/issues/7226): Fix database locked up when deleting shards +- [#7382](https://github.com/influxdata/influxdb/issues/7382): Shard stats include wal path tag so disk bytes make more sense. +- [#7385](https://github.com/influxdata/influxdb/pull/7385): Reduce query planning allocations +- [#7436](https://github.com/influxdata/influxdb/issues/7436): Remove accidentally added string support for the stddev call. +- [#7161](https://github.com/influxdata/influxdb/issues/7161): Drop measurement causes cache max memory exceeded error. +- [#7334](https://github.com/influxdata/influxdb/issues/7334): Panic with unread show series iterators during drop database +- [#7482](https://github.com/influxdata/influxdb/issues/7482): Fix issue where point would be written to wrong shard. +- [#7431](https://github.com/influxdata/influxdb/issues/7431): Remove /data/process_continuous_queries endpoint. +- [#7053](https://github.com/influxdata/influxdb/issues/7053): Delete statement returns an error when retention policy or database is specified +- [#7494](https://github.com/influxdata/influxdb/issues/7494): influx_inspect: export does not escape field keys. +- [#7526](https://github.com/influxdata/influxdb/issues/7526): Truncate the version string when linking to the documentation. +- [#7548](https://github.com/influxdata/influxdb/issues/7548): Fix output duration units for SHOW QUERIES. +- [#7564](https://github.com/influxdata/influxdb/issues/7564): Fix incorrect grouping when multiple aggregates are used with sparse data. +- [#7448](https://github.com/influxdata/influxdb/pull/7448): Fix Retention Policy Inconsistencies +- [#7606](https://github.com/influxdata/influxdb/pull/7606): Avoid deadlock when `max-row-limit` is hit. + +## v1.0.2 [2016-10-05] + +### Bugfixes + +- [#7150](https://github.com/influxdata/influxdb/issues/7150): Do not automatically reset the shard duration when using ALTER RETENTION POLICY +- [#5878](https://github.com/influxdata/influxdb/issues/5878): Ensure correct shard groups created when retention policy has been altered. +- [#7391](https://github.com/influxdata/influxdb/issues/7391): Fix RLE integer decoding producing negative numbers +- [#7335](https://github.com/influxdata/influxdb/pull/7335): Avoid stat syscall when planning compactions +- [#7330](https://github.com/influxdata/influxdb/issues/7330): Subscription data loss under high write load + +## v1.0.1 [2016-09-26] + +### Bugfixes + +- [#7271](https://github.com/influxdata/influxdb/issues/7271): Fixing typo within example configuration file. Thanks @andyfeller! +- [#7270](https://github.com/influxdata/influxdb/issues/7270): Implement time math for lazy time literals. +- [#7272](https://github.com/influxdata/influxdb/issues/7272): Report cmdline and memstats in /debug/vars. +- [#7299](https://github.com/influxdata/influxdb/ssues/7299): Ensure fieldsCreated stat available in shard measurement. +- [#6846](https://github.com/influxdata/influxdb/issues/6846): Read an invalid JSON response as an error in the influx client. +- [#7110](https://github.com/influxdata/influxdb/issues/7110): Skip past points at the same time in derivative call within a merged series. +- [#7226](https://github.com/influxdata/influxdb/issues/7226): Fix database locked up when deleting shards +- [#7315](https://github.com/influxdata/influxdb/issues/7315): Prevent users from manually using system queries since incorrect use would result in a panic. + +## v1.0.0 [2016-09-08] + +### Release Notes + +### Breaking changes + +* `max-series-per-database` was added with a default of 1M but can be disabled by setting it to `0`. Existing databases with series that exceed this limit will continue to load but writes that would create new series will fail. +* Config option `[cluster]` has been replaced with `[coordinator]` +* Support for config options `[collectd]` and `[opentsdb]` has been removed; use `[[collectd]]` and `[[opentsdb]]` instead. +* Config option `data-logging-enabled` within the `[data]` section, has been renamed to `trace-logging-enabled`, and defaults to `false`. +* The keywords `IF`, `EXISTS`, and `NOT` where removed for this release. This means you no longer need to specify `IF NOT EXISTS` for `DROP DATABASE` or `IF EXISTS` for `CREATE DATABASE`. If these are specified, a query parse error is returned. +* The Shard `writePointsFail` stat has been renamed to `writePointsErr` for consistency with other stats. + +With this release the systemd configuration files for InfluxDB will use the system configured default for logging and will no longer write files to `/var/log/influxdb` by default. On most systems, the logs will be directed to the systemd journal and can be accessed by `journalctl -u influxdb.service`. Consult the systemd journald documentation for configuring journald. + +### Features + +- [#3541](https://github.com/influxdata/influxdb/issues/3451): Update SHOW FIELD KEYS to return the field type with the field key. +- [#6609](https://github.com/influxdata/influxdb/pull/6609): Add support for JWT token authentication. +- [#6559](https://github.com/influxdata/influxdb/issues/6559): Teach the http service how to enforce connection limits. +- [#6623](https://github.com/influxdata/influxdb/pull/6623): Speed up drop database +- [#6519](https://github.com/influxdata/influxdb/issues/6519): Support cast syntax for selecting a specific type. +- [#6654](https://github.com/influxdata/influxdb/pull/6654): Add new HTTP statistics to monitoring +- [#6664](https://github.com/influxdata/influxdb/pull/6664): Adds monitoring statistic for on-disk shard size. +- [#2926](https://github.com/influxdata/influxdb/issues/2926): Support bound parameters in the parser. +- [#1310](https://github.com/influxdata/influxdb/issues/1310): Add https-private-key option to httpd config. +- [#6621](https://github.com/influxdata/influxdb/pull/6621): Add Holt-Winter forecasting function. +- [#6655](https://github.com/influxdata/influxdb/issues/6655): Add HTTP(s) based subscriptions. +- [#5906](https://github.com/influxdata/influxdb/issues/5906): Dynamically update the documentation link in the admin UI. +- [#6686](https://github.com/influxdata/influxdb/pull/6686): Optimize timestamp run-length decoding +- [#6713](https://github.com/influxdata/influxdb/pull/6713): Reduce allocations during query parsing. +- [#3733](https://github.com/influxdata/influxdb/issues/3733): Modify the default retention policy name and make it configurable. +- [#6812](https://github.com/influxdata/influxdb/pull/6812): Make httpd logger closer to Common (& combined) Log Format. +- [#5655](https://github.com/influxdata/influxdb/issues/5655): Support specifying a retention policy for the graphite service. +- [#6820](https://github.com/influxdata/influxdb/issues/6820): Add NodeID to execution options +- [#4532](https://github.com/influxdata/influxdb/issues/4532): Support regex selection in SHOW TAG VALUES for the key. +- [#6889](https://github.com/influxdata/influxdb/pull/6889): Update help and remove unused config options from the configuration file. +- [#6900](https://github.com/influxdata/influxdb/pull/6900): Trim BOM from Windows Notepad-saved config files. +- [#6938](https://github.com/influxdata/influxdb/issues/6938): Added favicon +- [#6507](https://github.com/influxdata/influxdb/issues/6507): Refactor monitor service to avoid expvar and write monitor statistics on a truncated time interval. +- [#6805](https://github.com/influxdata/influxdb/issues/6805): Allow any variant of the help option to trigger the help. +- [#5499](https://github.com/influxdata/influxdb/issues/5499): Add stats and diagnostics to the TSM engine. +- [#6959](https://github.com/influxdata/influxdb/issues/6959): Return 403 Forbidden when authentication succeeds but authorization fails. +- [#1110](https://github.com/influxdata/influxdb/issues/1110): Support loading a folder for collectd typesdb files. +- [#6928](https://github.com/influxdata/influxdb/issues/6928): Run continuous query for multiple buckets rather than one per bucket. +- [#5500](https://github.com/influxdata/influxdb/issues/5500): Add extra trace logging to tsm engine. +- [#6909](https://github.com/influxdata/influxdb/issues/6909): Log the CQ execution time when continuous query logging is enabled. +- [#7046](https://github.com/influxdata/influxdb/pull/7046): Add tsm file export to influx_inspect tool. +- [#7011](https://github.com/influxdata/influxdb/issues/7011): Create man pages for commands. +- [#7050](https://github.com/influxdata/influxdb/pull/7050): Update go package library dependencies. +- [#5750](https://github.com/influxdata/influxdb/issues/5750): Support wildcards in aggregate functions. +- [#7065](https://github.com/influxdata/influxdb/issues/7065): Remove IF EXISTS/IF NOT EXISTS from influxql language. +- [#7095](https://github.com/influxdata/influxdb/pull/7095): Add MaxSeriesPerDatabase config setting. +- [#7199](https://github.com/influxdata/influxdb/pull/7199): Add mode function. Thanks @agaurav. +- [#7194](https://github.com/influxdata/influxdb/issues/7194): Support negative timestamps for the query engine. +- [#7172](https://github.com/influxdata/influxdb/pull/7172): Write path stats + +### Bugfixes + +- [#6604](https://github.com/influxdata/influxdb/pull/6604): Remove old cluster code +- [#6618](https://github.com/influxdata/influxdb/pull/6618): Optimize shard loading +- [#6629](https://github.com/influxdata/influxdb/issues/6629): query-log-enabled in config not ignored anymore. +- [#6607](https://github.com/influxdata/influxdb/issues/6607): SHOW TAG VALUES accepts != and !~ in WHERE clause. +- [#6649](https://github.com/influxdata/influxdb/issues/6649): Make sure admin exists before authenticating query. +- [#6644](https://github.com/influxdata/influxdb/issues/6644): Print the query executor's stack trace on a panic to the log. +- [#6650](https://github.com/influxdata/influxdb/issues/6650): Data race when dropping a database immediately after writing to it +- [#6235](https://github.com/influxdata/influxdb/issues/6235): Fix measurement field panic in tsm1 engine. +- [#6663](https://github.com/influxdata/influxdb/issues/6663): Fixing panic in SHOW FIELD KEYS. +- [#6624](https://github.com/influxdata/influxdb/issues/6624): Ensure clients requesting gzip encoded bodies don't receive empty body +- [#6652](https://github.com/influxdata/influxdb/issues/6652): Fix panic: interface conversion: tsm1.Value is \*tsm1.StringValue, not \*tsm1.FloatValue +- [#6406](https://github.com/influxdata/influxdb/issues/6406): Max index entries exceeded +- [#6557](https://github.com/influxdata/influxdb/issues/6557): Overwriting points on large series can cause memory spikes during compactions +- [#6611](https://github.com/influxdata/influxdb/issues/6611): Queries slow down hundreds times after overwriting points +- [#6641](https://github.com/influxdata/influxdb/issues/6641): Fix read tombstones: EOF +- [#6661](https://github.com/influxdata/influxdb/issues/6661): Disable limit optimization when using an aggregate. +- [#6676](https://github.com/influxdata/influxdb/issues/6676): Ensures client sends correct precision when inserting points. +- [#2048](https://github.com/influxdata/influxdb/issues/2048): Check that retention policies exist before creating CQ +- [#6702](https://github.com/influxdata/influxdb/issues/6702): Fix SELECT statement required privileges. +- [#6701](https://github.com/influxdata/influxdb/issues/6701): Filter out sources that do not match the shard database/retention policy. +- [#6683](https://github.com/influxdata/influxdb/issues/6683): Fix compaction planning re-compacting large TSM files +- [#6693](https://github.com/influxdata/influxdb/pull/6693): Truncate the shard group end time if it exceeds MaxNanoTime. +- [#6672](https://github.com/influxdata/influxdb/issues/6672): Accept points with trailing whitespace. +- [#6599](https://github.com/influxdata/influxdb/issues/6599): Ensure that future points considered in SHOW queries. +- [#6720](https://github.com/influxdata/influxdb/issues/6720): Concurrent map read write panic. Thanks @arussellsaw +- [#6727](https://github.com/influxdata/influxdb/issues/6727): queries with strings that look like dates end up with date types, not string types +- [#6250](https://github.com/influxdata/influxdb/issues/6250): Slow startup time +- [#6753](https://github.com/influxdata/influxdb/issues/6753): Prevent panic if there are no values. +- [#6685](https://github.com/influxdata/influxdb/issues/6685): Batch SELECT INTO / CQ writes +- [#6756](https://github.com/influxdata/influxdb/issues/6756): Set X-Influxdb-Version header on every request (even 404 requests). +- [#6760](https://github.com/influxdata/influxdb/issues/6760): Prevent panic in concurrent auth cache write +- [#6771](https://github.com/influxdata/influxdb/issues/6771): Fix the point validation parser to identify and sort tags correctly. +- [#6835](https://github.com/influxdata/influxdb/pull/6835): Include sysvinit-tools as an rpm dependency. +- [#6834](https://github.com/influxdata/influxdb/pull/6834): Add port to all graphite log output to help with debugging multiple endpoints +- [#6850](https://github.com/influxdata/influxdb/pull/6850): Modify the max nanosecond time to be one nanosecond less. +- [#6824](https://github.com/influxdata/influxdb/issues/6824): Remove systemd output redirection. +- [#6859](https://github.com/influxdata/influxdb/issues/6859): Set the condition cursor instead of aux iterator when creating a nil condition cursor. +- [#6869](https://github.com/influxdata/influxdb/issues/6869): Remove FieldCodec from tsdb package. +- [#6882](https://github.com/influxdata/influxdb/pull/6882): Remove a double lock in the tsm1 index writer. +- [#6883](https://github.com/influxdata/influxdb/pull/6883): Rename dumptsmdev to dumptsm in influx_inspect. +- [#6864](https://github.com/influxdata/influxdb/pull/6864): Allow a non-admin to call "use" for the influx cli. +- [#6855](https://github.com/influxdata/influxdb/pull/6855): Update `stress/v2` to work with clusters, ssl, and username/password auth. Code cleanup +- [#6738](https://github.com/influxdata/influxdb/issues/6738): Time sorting broken with overwritten points +- [#6829](https://github.com/influxdata/influxdb/issues/6829): Fix panic: runtime error: index out of range +- [#6911](https://github.com/influxdata/influxdb/issues/6911): Fix fill(previous) when used with math operators. +- [#6934](https://github.com/influxdata/influxdb/pull/6934): Fix regex binary encoding for a measurement. +- [#6942](https://github.com/influxdata/influxdb/pull/6942): Fix panic: truncate the slice when merging the caches. +- [#6708](https://github.com/influxdata/influxdb/issues/6708): Drop writes from before the retention policy time window. +- [#6968](https://github.com/influxdata/influxdb/issues/6968): Always use the demo config when outputting a new config. +- [#6986](https://github.com/influxdata/influxdb/pull/6986): update connection settings when changing hosts in cli. +- [#6965](https://github.com/influxdata/influxdb/pull/6965): Minor improvements to init script. Removes sysvinit-utils as package dependency. +- [#6952](https://github.com/influxdata/influxdb/pull/6952): Fix compaction planning with large TSM files +- [#6819](https://github.com/influxdata/influxdb/issues/6819): Database unresponsive after DROP MEASUREMENT +- [#6796](https://github.com/influxdata/influxdb/issues/6796): Out of Memory Error when Dropping Measurement +- [#6946](https://github.com/influxdata/influxdb/issues/6946): Duplicate data for the same timestamp +- [#7043](https://github.com/influxdata/influxdb/pull/7043): Remove limiter from walkShards +- [#5501](https://github.com/influxdata/influxdb/issues/5501): Queries against files that have just been compacted need to point to new files +- [#6595](https://github.com/influxdata/influxdb/issues/6595): Fix full compactions conflicting with level compactions +- [#7081](https://github.com/influxdata/influxdb/issues/7081): Hardcode auto generated RP names to autogen +- [#7088](https://github.com/influxdata/influxdb/pull/7088): Fix UDP pointsRx being incremented twice. +- [#7080](https://github.com/influxdata/influxdb/pull/7080): Ensure IDs can't clash when managing Continuous Queries. +- [#6990](https://github.com/influxdata/influxdb/issues/6990): Fix panic parsing empty key +- [#7084](https://github.com/influxdata/influxdb/pull/7084): Tombstone memory improvements +- [#6543](https://github.com/influxdata/influxdb/issues/6543): Fix parseFill to check for fill ident before attempting to parse an expression. +- [#7032](https://github.com/influxdata/influxdb/pull/7032): Copy tags in influx_stress to avoid a concurrent write panic on a map. +- [#7028](https://github.com/influxdata/influxdb/pull/7028): Do not run continuous queries that have no time span. +- [#7025](https://github.com/influxdata/influxdb/issues/7025): Move the CQ interval by the group by offset. +- [#7125](https://github.com/influxdata/influxdb/pull/7125): Ensure gzip writer is closed in influx_inspect export +- [#7127](https://github.com/influxdata/influxdb/pull/7127): Concurrent series limit +- [#7119](https://github.com/influxdata/influxdb/pull/7119): Fix CREATE DATABASE when dealing with default values. +- [#7218](https://github.com/influxdata/influxdb/issues/7218): Fix alter retention policy when all options are used. +- [#7225](https://github.com/influxdata/influxdb/issues/7225): runtime: goroutine stack exceeds 1000000000-byte limit +- [#7240](https://github.com/influxdata/influxdb/issues/7240): Allow blank lines in the line protocol input. +- [#7119](https://github.com/influxdata/influxdb/pull/7119): Fix CREATE DATABASE when dealing with default values. +- [#7243](https://github.com/influxdata/influxdb/issues/7243): Optimize queries that compare a tag value to an empty string. +- [#7074](https://github.com/influxdata/influxdb/issues/7074): Continuous full compactions + +## v0.13.0 [2016-05-12] + +### Release Notes + +With this release InfluxDB is moving to Go v1.6. + +### Features + +- [#6213](https://github.com/influxdata/influxdb/pull/6213): Make logging output location more programmatically configurable. +- [#6237](https://github.com/influxdata/influxdb/issues/6237): Enable continuous integration testing on Windows platform via AppVeyor. Thanks @mvadu +- [#6263](https://github.com/influxdata/influxdb/pull/6263): Reduce UDP Service allocation size. +- [#6228](https://github.com/influxdata/influxdb/pull/6228): Support for multiple listeners for collectd and OpenTSDB inputs. +- [#6292](https://github.com/influxdata/influxdb/issues/6292): Allow percentile to be used as a selector. +- [#5707](https://github.com/influxdata/influxdb/issues/5707): Return a deprecated message when IF NOT EXISTS is used. +- [#6334](https://github.com/influxdata/influxdb/pull/6334): Allow environment variables to be set per input type. +- [#6394](https://github.com/influxdata/influxdb/pull/6394): Allow time math with integer timestamps. +- [#3247](https://github.com/influxdata/influxdb/issues/3247): Implement derivatives across intervals for aggregate queries. +- [#3166](https://github.com/influxdata/influxdb/issues/3166): Sort the series keys inside of a tag set so output is deterministic. +- [#1856](https://github.com/influxdata/influxdb/issues/1856): Add `elapsed` function that returns the time delta between subsequent points. +- [#5502](https://github.com/influxdata/influxdb/issues/5502): Add checksum verification to TSM inspect tool +- [#6444](https://github.com/influxdata/influxdb/pull/6444): Allow setting the config path through an environment variable and default config path. +- [#3558](https://github.com/influxdata/influxdb/issues/3558): Support field math inside a WHERE clause. +- [#6429](https://github.com/influxdata/influxdb/issues/6429): Log slow queries if they pass a configurable threshold. +- [#4675](https://github.com/influxdata/influxdb/issues/4675): Allow derivative() function to be used with ORDER BY desc. +- [#6483](https://github.com/influxdata/influxdb/pull/6483): Delete series support for TSM +- [#6484](https://github.com/influxdata/influxdb/pull/6484): Query language support for DELETE +- [#6290](https://github.com/influxdata/influxdb/issues/6290): Add POST /query endpoint and warning messages for using GET with write operations. +- [#6494](https://github.com/influxdata/influxdb/issues/6494): Support booleans for min() and max(). +- [#2074](https://github.com/influxdata/influxdb/issues/2074): Support offset argument in the GROUP BY time(...) call. +- [#6533](https://github.com/influxdata/influxdb/issues/6533): Optimize SHOW SERIES +- [#6534](https://github.com/influxdata/influxdb/pull/6534): Move to Go v1.6.2 (over Go v1.4.3) +- [#6522](https://github.com/influxdata/influxdb/pull/6522): Dump TSM files to line protocol +- [#6585](https://github.com/influxdata/influxdb/pull/6585): Parallelize iterators +- [#6502](https://github.com/influxdata/influxdb/pull/6502): Add ability to copy shard via rpc calls. Remove deprecated copier service. +- [#6593](https://github.com/influxdata/influxdb/pull/6593): Add ability to create snapshots of shards. + +### Bugfixes + +- [#6283](https://github.com/influxdata/influxdb/pull/6283): Fix GROUP BY tag to produce consistent results when a series has no tags. +- [#3773](https://github.com/influxdata/influxdb/issues/3773): Support empty tags for all WHERE equality operations. +- [#6270](https://github.com/influxdata/influxdb/issues/6270): tsm1 query engine alloc reduction +- [#6287](https://github.com/influxdata/influxdb/issues/6287): Fix data race in Influx Client. +- [#6252](https://github.com/influxdata/influxdb/pull/6252): Remove TSDB listener accept message @simnv +- [#6202](https://github.com/influxdata/influxdb/pull/6202): Check default SHARD DURATION when recreating the same database. +- [#6296](https://github.com/influxdata/influxdb/issues/6296): Allow the implicit time field to be renamed again. +- [#6294](https://github.com/influxdata/influxdb/issues/6294): Fix panic running influx_inspect info. +- [#6382](https://github.com/influxdata/influxdb/pull/6382): Removed dead code from the old query engine. +- [#3369](https://github.com/influxdata/influxdb/issues/3369): Detect when a timer literal will overflow or underflow the query engine. +- [#6398](https://github.com/influxdata/influxdb/issues/6398): Fix CREATE RETENTION POLICY parsing so it doesn't consume tokens it shouldn't. +- [#6425](https://github.com/influxdata/influxdb/pull/6425): Close idle tcp connections in HTTP client to prevent tcp conn leak. +- [#6109](https://github.com/influxdata/influxdb/issues/6109): Cache maximum memory size exceeded on startup +- [#6427](https://github.com/influxdata/influxdb/pull/6427): Fix setting uint config options via env vars +- [#6458](https://github.com/influxdata/influxdb/pull/6458): Make it clear when the CLI version is unknown. +- [#3883](https://github.com/influxdata/influxdb/issues/3883): Improve query sanitization to prevent a password leak in the logs. +- [#6462](https://github.com/influxdata/influxdb/pull/6462): Add safer locking to CreateFieldIfNotExists +- [#6361](https://github.com/influxdata/influxdb/pull/6361): Fix cluster/pool release of connection +- [#6470](https://github.com/influxdata/influxdb/pull/6470): Remove SHOW SERVERS & DROP SERVER support +- [#6477](https://github.com/influxdata/influxdb/pull/6477): Don't catch SIGQUIT or SIGHUP signals. +- [#6468](https://github.com/influxdata/influxdb/issues/6468): Panic with truncated wal segments +- [#6491](https://github.com/influxdata/influxdb/pull/6491): Fix the CLI not to enter an infinite loop when the liner has an error. +- [#6457](https://github.com/influxdata/influxdb/issues/6457): Retention policy cleanup does not remove series +- [#6477](https://github.com/influxdata/influxdb/pull/6477): Don't catch SIGQUIT or SIGHUP signals. +- [#6468](https://github.com/influxdata/influxdb/issues/6468): Panic with truncated wal segments +- [#6480](https://github.com/influxdata/influxdb/issues/6480): Fix SHOW statements' rewriting bug +- [#6505](https://github.com/influxdata/influxdb/issues/6505): Add regex literal to InfluxQL spec for FROM clause. +- [#5890](https://github.com/influxdata/influxdb/issues/5890): Return the time with a selector when there is no group by interval. +- [#6496](https://github.com/influxdata/influxdb/issues/6496): Fix parsing escaped series key when loading database index +- [#6495](https://github.com/influxdata/influxdb/issues/6495): Fix aggregate returns when data is missing from some shards. +- [#6439](https://github.com/influxdata/influxdb/issues/6439): Overwriting points returning old values +- [#6261](https://github.com/influxdata/influxdb/issues/6261): High CPU usage and slow query with DISTINCT + +## v0.12.2 [2016-04-20] + +### Bugfixes + +- [#6271](https://github.com/influxdata/influxdb/issues/6271): Fixed deadlock in tsm1 file store. +- [#6413](https://github.com/influxdata/influxdb/pull/6413): Prevent goroutine leak from persistent http connections. Thanks @aaronknister. +- [#6414](https://github.com/influxdata/influxdb/pull/6414): Send "Connection: close" header for queries. +- [#6419](https://github.com/influxdata/influxdb/issues/6419): Fix panic in transform iterator on division. @thbourlove +- [#6379](https://github.com/influxdata/influxdb/issues/6379): Validate the first argument to percentile() is a variable. +- [#6383](https://github.com/influxdata/influxdb/pull/6383): Recover from a panic during query execution. + +## v0.12.1 [2016-04-08] + +### Bugfixes + +- [#6225](https://github.com/influxdata/influxdb/pull/6225): Refresh admin assets. +- [#6206](https://github.com/influxdata/influxdb/issues/6206): Handle nil values from the tsm1 cursor correctly. +- [#6190](https://github.com/influxdata/influxdb/pull/6190): Fix race on measurementFields. +- [#6248](https://github.com/influxdata/influxdb/issues/6248): Panic using incorrectly quoted "queries" field key. +- [#6257](https://github.com/influxdata/influxdb/issues/6257): CreateShardGroup was incrementing meta data index even when it was idempotent. +- [#6223](https://github.com/influxdata/influxdb/issues/6223): Failure to start/run on Windows. Thanks @mvadu +- [#6229](https://github.com/influxdata/influxdb/issues/6229): Fixed aggregate queries with no GROUP BY to include the end time. + + +## v0.12.0 [2016-04-05] +### Release Notes +Upgrading to this release requires a little more than just installing the new binary and starting it up. The upgrade process is very quick and should only require a minute of downtime or less. Details on [upgrading to 0.12 are here](https://docs.influxdata.com/influxdb/v0.12/administration/upgrading/). + +This release removes all of the old clustering code. It operates as a standalone server. For a free open source HA setup see the [InfluxDB Relay](https://github.com/influxdata/influxdb-relay). + +### Features + +- [#6012](https://github.com/influxdata/influxdb/pull/6012): Add DROP SHARD support. +- [#6025](https://github.com/influxdata/influxdb/pull/6025): Remove deprecated JSON write path. +- [#5744](https://github.com/influxdata/influxdb/issues/5744): Add integer literal support to the query language. +- [#5939](https://github.com/influxdata/influxdb/issues/5939): Support viewing and killing running queries. +- [#6073](https://github.com/influxdata/influxdb/pull/6073): Iterator stats +- [#6079](https://github.com/influxdata/influxdb/issues/6079): Limit the maximum number of concurrent queries. +- [#6075](https://github.com/influxdata/influxdb/issues/6075): Limit the maximum running time of a query. +- [#6102](https://github.com/influxdata/influxdb/issues/6102): Limit series count in selection +- [#6077](https://github.com/influxdata/influxdb/issues/6077): Limit point count in selection. +- [#6078](https://github.com/influxdata/influxdb/issues/6078): Limit bucket count in selection. +- [#6060](https://github.com/influxdata/influxdb/pull/6060): Add configurable shard duration to retention policies +- [#6116](https://github.com/influxdata/influxdb/pull/6116): Allow `httpd` service to be extensible for routes +- [#6111](https://github.com/influxdata/influxdb/pull/6111): Add ability to build static assest. Improved handling of TAR and ZIP package outputs. +- [#1825](https://github.com/influxdata/influxdb/issues/1825): Implement difference function. +- [#6112](https://github.com/influxdata/influxdb/issues/6112): Implement simple moving average function. +- [#6149](https://github.com/influxdata/influxdb/pull/6149): Kill running queries when server is shutdown. +- [#5372](https://github.com/influxdata/influxdb/pull/5372): Faster shard loading +- [#6148](https://github.com/influxdata/influxdb/pull/6148): Build script is now compatible with Python 3. Added ability to create detached signatures for packages. Build script now uses Python logging facility for messages. +- [#6115](https://github.com/influxdata/influxdb/issues/6115): Support chunking query results mid-series. Limit non-chunked output. +- [#6166](https://github.com/influxdata/influxdb/pull/6166): Teach influxdb client how to use chunked queries and use in the CLI. +- [#6158](https://github.com/influxdata/influxdb/pull/6158): Update influxd to detect an upgrade from `0.11` to `0.12`. Minor restore bug fixes. +- [#6193](https://github.com/influxdata/influxdb/pull/6193): Fix TypeError when processing empty results in admin UI. Thanks @jonseymour! + +### Bugfixes + +- [#5152](https://github.com/influxdata/influxdb/issues/5152): Fix where filters when a tag and a filter are combined with OR. +- [#5728](https://github.com/influxdata/influxdb/issues/5728): Properly handle semi-colons as part of the main query loop. +- [#6065](https://github.com/influxdata/influxdb/pull/6065): Wait for a process termination on influxdb restart @simnv +- [#5252](https://github.com/influxdata/influxdb/issues/5252): Release tarballs contain specific attributes on '.' +- [#5554](https://github.com/influxdata/influxdb/issues/5554): Can't run in alpine linux +- [#6094](https://github.com/influxdata/influxdb/issues/6094): Ensure CREATE RETENTION POLICY and CREATE CONTINUOUS QUERY are idempotent in the correct way. +- [#6061](https://github.com/influxdata/influxdb/issues/6061): [0.12 / master] POST to /write does not write points if request has header 'Content-Type: application/x-www-form-urlencoded' +- [#6140](https://github.com/influxdata/influxdb/issues/6140): Ensure Shard engine not accessed when closed. +- [#6110](https://github.com/influxdata/influxdb/issues/6110): Fix for 0.9 upgrade path when using RPM +- [#6131](https://github.com/influxdata/influxdb/issues/6061): Fix write throughput regression with large number of measurments +- [#6152](https://github.com/influxdata/influxdb/issues/6152): Allow SHARD DURATION to be specified in isolation when creating a database +- [#6153](https://github.com/influxdata/influxdb/issues/6153): Check SHARD DURATION when recreating the same database +- [#6178](https://github.com/influxdata/influxdb/issues/6178): Ensure SHARD DURATION is checked when recreating a retention policy + +## v0.11.1 [2016-03-31] + +### Bugfixes + +- [#6092](https://github.com/influxdata/influxdb/issues/6092): Upgrading directly from 0.9.6.1 to 0.11.0 fails +- [#6129](https://github.com/influxdata/influxdb/pull/6129): Fix default continuous query lease host +- [#6121](https://github.com/influxdata/influxdb/issues/6121): Fix panic: slice index out of bounds in TSM index +- [#6168](https://github.com/influxdata/influxdb/pull/6168): Remove per measurement statsitics +- [#3932](https://github.com/influxdata/influxdb/issues/3932): Invalid timestamp format should throw an error. + +## v0.11.0 [2016-03-22] + +### Release Notes + +There were some important breaking changes in this release. Here's a list of the important things to know before upgrading: + +* [SHOW SERIES output has changed](https://github.com/influxdata/influxdb/pull/5937). See [new output in this test diff](https://github.com/influxdata/influxdb/pull/5937/files#diff-0cb24c2b7420b4db507ee3496c371845L263). +* [SHOW TAG VALUES output has changed](https://github.com/influxdata/influxdb/pull/5853) +* JSON write endpoint is disabled by default and will be removed in the next release. You can [turn it back on](https://github.com/influxdata/influxdb/pull/5512) in this release. +* b1/bz1 shards are no longer supported. You must migrate all old shards to TSM using [the migration tool](https://github.com/influxdata/influxdb/blob/master/cmd/influx_tsm/README.md). +* On queries to create databases, retention policies, and users, the default behavior has changed to create `IF NOT EXISTS`. If they already exist, no error will be returned. +* On queries with a selector like `min`, `max`, `first`, and `last` the time returned will be the time for the bucket of the group by window. [Selectors for the time for the specific point](https://github.com/influxdata/influxdb/issues/5926) will be added later. + +### Features + +- [#5596](https://github.com/influxdata/influxdb/pull/5596): Build improvements for ARM architectures. Also removed `--goarm` and `--pkgarch` build flags. +- [#5541](https://github.com/influxdata/influxdb/pull/5541): Client: Support for adding custom TLS Config for HTTP client. +- [#4299](https://github.com/influxdata/influxdb/pull/4299): Client: Reject uint64 Client.Point.Field values. Thanks @arussellsaw +- [#5550](https://github.com/influxdata/influxdb/pull/5550): Enabled golint for tsdb/engine/wal. @gabelev +- [#5419](https://github.com/influxdata/influxdb/pull/5419): Graphite: Support matching tags multiple times Thanks @m4ce +- [#5598](https://github.com/influxdata/influxdb/pull/5598): Client: Add Ping to v2 client @PSUdaemon +- [#4125](https://github.com/influxdata/influxdb/pull/4125): Admin UI: Fetch and display server version on connect. Thanks @alexiri! +- [#5681](https://github.com/influxdata/influxdb/pull/5681): Stats: Add durations, number currently active to httpd and query executor +- [#5602](https://github.com/influxdata/influxdb/pull/5602): Simplify cluster startup for scripting and deployment +- [#5562](https://github.com/influxdata/influxdb/pull/5562): Graphite: Support matching fields multiple times (@chrusty) +- [#5666](https://github.com/influxdata/influxdb/pull/5666): Manage dependencies with gdm +- [#5512](https://github.com/influxdata/influxdb/pull/5512): HTTP: Add config option to enable HTTP JSON write path which is now disabled by default. +- [#5336](https://github.com/influxdata/influxdb/pull/5366): Enabled golint for influxql. @gabelev +- [#5706](https://github.com/influxdata/influxdb/pull/5706): Cluster setup cleanup +- [#5691](https://github.com/influxdata/influxdb/pull/5691): Remove associated shard data when retention policies are dropped. +- [#5758](https://github.com/influxdata/influxdb/pull/5758): TSM engine stats for cache, WAL, and filestore. Thanks @jonseymour +- [#5844](https://github.com/influxdata/influxdb/pull/5844): Tag TSM engine stats with database and retention policy +- [#5593](https://github.com/influxdata/influxdb/issues/5593): Modify `SHOW TAG VALUES` output for the new query engine to normalize the output. +- [#5862](https://github.com/influxdata/influxdb/pull/5862): Make Admin UI dynamically fetch both client and server versions +- [#2715](https://github.com/influxdata/influxdb/issues/2715): Support using field regex comparisons in the WHERE clause +- [#5994](https://github.com/influxdata/influxdb/issues/5994): Single server +- [#5737](https://github.com/influxdata/influxdb/pull/5737): Admin UI: Display results of multiple queries, not just the first query. Thanks @Vidhuran! +- [#5720](https://github.com/influxdata/influxdb/pull/5720): Admin UI: New button to generate permalink to queries + +### Bugfixes + +- [#5182](https://github.com/influxdata/influxdb/pull/5182): Graphite: Fix an issue where the default template would be used instead of a more specific one. Thanks @flisky +- [#5489](https://github.com/influxdata/influxdb/pull/5489): Fixes multiple issues causing tests to fail on windows. Thanks @runner-mei +- [#5594](https://github.com/influxdata/influxdb/pull/5594): Fix missing url params on lease redirect - @oldmantaiter +- [#5376](https://github.com/influxdata/influxdb/pull/5376): Fix golint issues in models package. @nuss-justin +- [#5535](https://github.com/influxdata/influxdb/pull/5535): Update README for referring to Collectd +- [#5590](https://github.com/influxdata/influxdb/pull/5590): Fix panic when dropping subscription for unknown retention policy. +- [#5375](https://github.com/influxdata/influxdb/pull/5375): Lint tsdb and tsdb/engine package @nuss-justin +- [#5624](https://github.com/influxdata/influxdb/pull/5624): Fix golint issues in client v2 package @PSUDaemon +- [#5510](https://github.com/influxdata/influxdb/pull/5510): Optimize ReducePercentile @bsideup +- [#5557](https://github.com/influxdata/influxdb/issues/5630): Fixes panic when surrounding the select statement arguments in brackets +- [#5628](https://github.com/influxdata/influxdb/issues/5628): Crashed the server with a bad derivative query +- [#5532](https://github.com/influxdata/influxdb/issues/5532): user passwords not changeable in cluster +- [#5695](https://github.com/influxdata/influxdb/pull/5695): Remove meta servers from node.json +- [#5606](https://github.com/influxdata/influxdb/issues/5606): TSM conversion reproducibly drops data silently +- [#5656](https://github.com/influxdata/influxdb/issues/5656): influx\_tsm: panic during conversion +- [#5696](https://github.com/influxdata/influxdb/issues/5696): Do not drop the database when creating with a retention policy +- [#5724](https://github.com/influxdata/influxdb/issues/5724): influx\_tsm doesn't close file handles properly +- [#5664](https://github.com/influxdata/influxdb/issues/5664): panic in model.Points.scanTo #5664 +- [#5716](https://github.com/influxdata/influxdb/pull/5716): models: improve handling of points with empty field names or with no fields. +- [#5719](https://github.com/influxdata/influxdb/issues/5719): Fix cache not deduplicating points +- [#5754](https://github.com/influxdata/influxdb/issues/5754): Adding a node as meta only results in a data node also being registered +- [#5787](https://github.com/influxdata/influxdb/pull/5787): HTTP: Add QueryAuthorizer instance to httpd service’s handler. @chris-ramon +- [#5753](https://github.com/influxdata/influxdb/pull/5753): Ensures that drop-type commands work correctly in a cluster +- [#5814](https://github.com/influxdata/influxdb/issues/5814): Run CQs with the same name from different databases +- [#5699](https://github.com/influxdata/influxdb/issues/5699): Fix potential thread safety issue in cache @jonseymour +- [#5832](https://github.com/influxdata/influxdb/issues/5832): tsm: cache: need to check that snapshot has been sorted @jonseymour +- [#5841](https://github.com/influxdata/influxdb/pull/5841): Reduce tsm allocations by converting time.Time to int64 +- [#5842](https://github.com/influxdata/influxdb/issues/5842): Add SeriesList binary marshaling +- [#5854](https://github.com/influxdata/influxdb/issues/5854): failures of tests in tsdb/engine/tsm1 when compiled with go master +- [#5610](https://github.com/influxdata/influxdb/issues/5610): Write into fully-replicated cluster is not replicated across all shards +- [#5880](https://github.com/influxdata/influxdb/issues/5880): TCP connection closed after write (regression/change from 0.9.6) +- [#5865](https://github.com/influxdata/influxdb/issues/5865): Conversion to tsm fails with exceeds max index value +- [#5924](https://github.com/influxdata/influxdb/issues/5924): Missing data after using influx\_tsm +- [#5937](https://github.com/influxdata/influxdb/pull/5937): Rewrite SHOW SERIES to use query engine +- [#5949](https://github.com/influxdata/influxdb/issues/5949): Return error message when improper types are used in SELECT +- [#5963](https://github.com/influxdata/influxdb/pull/5963): Fix possible deadlock +- [#4688](https://github.com/influxdata/influxdb/issues/4688): admin UI doesn't display results for some SHOW queries +- [#6006](https://github.com/influxdata/influxdb/pull/6006): Fix deadlock while running backups +- [#5965](https://github.com/influxdata/influxdb/issues/5965): InfluxDB panic crashes while parsing "-" as Float +- [#5835](https://github.com/influxdata/influxdb/issues/5835): Make CREATE USER default to IF NOT EXISTS +- [#6042](https://github.com/influxdata/influxdb/issues/6042): CreateDatabase failure on Windows, regression from v0.11.0 RC @mvadu +- [#5889](https://github.com/influxdata/influxdb/issues/5889): Fix writing partial TSM index when flush file fails + +## v0.10.3 [2016-03-09] + +### Bugfixes + +- [#5924](https://github.com/influxdata/influxdb/issues/5924): Missing data after using influx\_tsm +- [#5594](https://github.com/influxdata/influxdb/pull/5594): Fix missing url params on lease redirect - @oldmantaiter +- [#5716](https://github.com/influxdata/influxdb/pull/5716): models: improve handling of points with empty field names or with no fields. + +## v0.10.2 [2016-03-03] + +### Bugfixes + +- [#5719](https://github.com/influxdata/influxdb/issues/5719): Fix cache not deduplicating points +- [#5699](https://github.com/influxdata/influxdb/issues/5699): Fix potential thread safety issue in cache @jonseymour +- [#5832](https://github.com/influxdata/influxdb/issues/5832): tsm: cache: need to check that snapshot has been sorted @jonseymour +- [#5857](https://github.com/influxdata/influxdb/issues/5857): panic in tsm1.Values.Deduplicate +- [#5861](https://github.com/influxdata/influxdb/pull/5861): Fix panic when dropping subscription for unknown retention policy. +- [#5880](https://github.com/influxdata/influxdb/issues/5880): TCP connection closed after write (regression/change from 0.9.6) +- [#5865](https://github.com/influxdata/influxdb/issues/5865): Conversion to tsm fails with exceeds max index value + +## v0.10.1 [2016-02-18] + +### Bugfixes +- [#5696](https://github.com/influxdata/influxdb/issues/5696): Do not drop the database when creating with a retention policy +- [#5724](https://github.com/influxdata/influxdb/issues/5724): influx\_tsm doesn't close file handles properly +- [#5606](https://github.com/influxdata/influxdb/issues/5606): TSM conversion reproducibly drops data silently +- [#5656](https://github.com/influxdata/influxdb/issues/5656): influx\_tsm: panic during conversion +- [#5303](https://github.com/influxdata/influxdb/issues/5303): Protect against stateful mappers returning nothing in the raw executor + +## v0.10.0 [2016-02-04] + +### Release Notes + +This release now uses the TSM storage engine. Old bz1 and b1 shards can still be read, but in a future release you will be required to migrate old shards to TSM. For new shards getting created, or new installations, the TSM storage engine will be used. + +This release also changes how clusters are setup. The config file has changed so have a look at the new example. Also, upgrading a single node works, but for upgrading clusters, you'll need help from us. Sent us a note at contact@influxdb.com if you need assistance upgrading a cluster. + +### Features +- [#5183](https://github.com/influxdata/influxdb/pull/5183): CLI confirms database exists when USE executed. Thanks @pires +- [#5201](https://github.com/influxdata/influxdb/pull/5201): Allow max UDP buffer size to be configurable. Thanks @sebito91 +- [#5194](https://github.com/influxdata/influxdb/pull/5194): Custom continuous query options per query rather than per node. +- [#5224](https://github.com/influxdata/influxdb/pull/5224): Online backup/incremental backup. Restore (for TSM). +- [#5226](https://github.com/influxdata/influxdb/pull/5226): b\*1 to tsm1 shard conversion tool. +- [#5459](https://github.com/influxdata/influxdb/pull/5459): Create `/status` endpoint for health checks. +- [#5460](https://github.com/influxdata/influxdb/pull/5460): Prevent exponential growth in CLI history. Thanks @sczk! +- [#5522](https://github.com/influxdata/influxdb/pull/5522): Optimize tsm1 cache to reduce memory consumption and GC scan time. +- [#5565](https://github.com/influxdata/influxdb/pull/5565): Add configuration for time precision with UDP services. - @tpitale +- [#5226](https://github.com/influxdata/influxdb/pull/5226): b*1 to tsm1 shard conversion tool. + +### Bugfixes +- [#5129](https://github.com/influxdata/influxdb/pull/5129): Ensure precision flag is respected by CLI. Thanks @e-dard +- [#5042](https://github.com/influxdata/influxdb/issues/5042): Count with fill(none) will drop 0 valued intervals. +- [#4735](https://github.com/influxdata/influxdb/issues/4735): Fix panic when merging empty results. +- [#5016](https://github.com/influxdata/influxdb/pull/5016): Don't panic if Meta data directory not writable. Thanks @oiooj +- [#5059](https://github.com/influxdata/influxdb/pull/5059): Fix unmarshal of database error by client code. Thanks @farshidtz +- [#4940](https://github.com/influxdata/influxdb/pull/4940): Fix distributed aggregate query query error. Thanks @li-ang +- [#4622](https://github.com/influxdata/influxdb/issues/4622): Fix panic when passing too large of timestamps to OpenTSDB input. +- [#5064](https://github.com/influxdata/influxdb/pull/5064): Full support for parenthesis in SELECT clause, fixes [#5054](https://github.com/influxdata/influxdb/issues/5054). Thanks @mengjinglei +- [#5079](https://github.com/influxdata/influxdb/pull/5079): Ensure tsm WAL encoding buffer can handle large batches. +- [#4303](https://github.com/influxdata/influxdb/issues/4303): Don't drop measurements or series from multiple databases. +- [#5078](https://github.com/influxdata/influxdb/issues/5078): influx non-interactive mode - INSERT must be handled. Thanks @grange74 +- [#5178](https://github.com/influxdata/influxdb/pull/5178): SHOW FIELD shouldn't consider VALUES to be valid. Thanks @pires +- [#5158](https://github.com/influxdata/influxdb/pull/5158): Fix panic when writing invalid input to the line protocol. +- [#5264](https://github.com/influxdata/influxdb/pull/5264): Fix panic: runtime error: slice bounds out of range +- [#5186](https://github.com/influxdata/influxdb/pull/5186): Fix database creation with retention statement parsing. Fixes [#5077](https://github.com/influxdata/influxdb/issues/5077). Thanks @pires +- [#5193](https://github.com/influxdata/influxdb/issues/5193): Missing data a minute before current time. Comes back later. +- [#5350](https://github.com/influxdata/influxdb/issues/5350): 'influxd backup' should create backup directory +- [#5262](https://github.com/influxdata/influxdb/issues/5262): Fix a panic when a tag value was empty. +- [#5382](https://github.com/influxdata/influxdb/pull/5382): Fixes some escaping bugs with tag keys and values. +- [#5349](https://github.com/influxdata/influxdb/issues/5349): Validate metadata blob for 'influxd backup' +- [#5469](https://github.com/influxdata/influxdb/issues/5469): Conversion from bz1 to tsm doesn't work as described +- [#5449](https://github.com/influxdata/influxdb/issues/5449): panic when dropping collectd points +- [#5455](https://github.com/influxdata/influxdb/issues/5455): panic: runtime error: slice bounds out of range when loading corrupted wal segment +- [#5478](https://github.com/influxdata/influxdb/issues/5478): panic: interface conversion: interface is float64, not int64 +- [#5475](https://github.com/influxdata/influxdb/issues/5475): Ensure appropriate exit code returned for non-interactive use of CLI. +- [#5479](https://github.com/influxdata/influxdb/issues/5479): Bringing up a node as a meta only node causes panic +- [#5504](https://github.com/influxdata/influxdb/issues/5504): create retention policy on unexistant DB crash InfluxDB +- [#5505](https://github.com/influxdata/influxdb/issues/5505): Clear authCache in meta.Client when password changes. +- [#5244](https://github.com/influxdata/influxdb/issues/5244): panic: ensure it's safe to close engine multiple times. + +## v0.9.6 [2015-12-09] + +### Release Notes +This release has an updated design and implementation of the TSM storage engine. If you had been using tsm1 as your storage engine prior to this release (either 0.9.5.x or 0.9.6 nightly builds) you will have to start with a fresh database. + +If you had TSM configuration options set, those have been updated. See the the updated sample configuration for more details: https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml#L98-L125 + +### Features +- [#4790](https://github.com/influxdata/influxdb/pull/4790): Allow openTSDB point-level error logging to be disabled +- [#4728](https://github.com/influxdata/influxdb/pull/4728): SHOW SHARD GROUPS. By @mateuszdyminski +- [#4841](https://github.com/influxdata/influxdb/pull/4841): Improve point parsing speed. Lint models pacakge. Thanks @e-dard! +- [#4889](https://github.com/influxdata/influxdb/pull/4889): Implement close notifier and timeout on executors +- [#2676](https://github.com/influxdata/influxdb/issues/2676), [#4866](https://github.com/influxdata/influxdb/pull/4866): Add support for specifying default retention policy in database create. Thanks @pires! +- [#4848](https://github.com/influxdata/influxdb/pull/4848): Added framework for cluster integration testing. +- [#4872](https://github.com/influxdata/influxdb/pull/4872): Add option to disable logging for meta service. +- [#4787](https://github.com/influxdata/influxdb/issues/4787): Now builds on Solaris + +### Bugfixes +- [#4849](https://github.com/influxdata/influxdb/issues/4849): Derivative works with count, mean, median, sum, first, last, max, min, and percentile. +- [#4984](https://github.com/influxdata/influxdb/pull/4984): Allow math on fields, fixes regression. Thanks @mengjinglei +- [#4666](https://github.com/influxdata/influxdb/issues/4666): Fix panic in derivative with invalid values. +- [#4404](https://github.com/influxdata/influxdb/issues/4404): Return better error for currently unsupported DELETE queries. +- [#4858](https://github.com/influxdata/influxdb/pull/4858): Validate nested aggregations in queries. Thanks @viru +- [#4921](https://github.com/influxdata/influxdb/pull/4921): Error responses should be JSON-formatted. Thanks @pires +- [#4974](https://github.com/influxdata/influxdb/issues/4974) Fix Data Race in TSDB when setting measurement field name +- [#4876](https://github.com/influxdata/influxdb/pull/4876): Complete lint for monitor and services packages. Thanks @e-dard! +- [#4833](https://github.com/influxdata/influxdb/pull/4833), [#4927](https://github.com/influxdata/influxdb/pull/4927): Fix SHOW MEASURMENTS for clusters. Thanks @li-ang! +- [#4918](https://github.com/influxdata/influxdb/pull/4918): Restore can hang, Fix [issue #4806](https://github.com/influxdata/influxdb/issues/4806). Thanks @oiooj +- [#4855](https://github.com/influxdata/influxdb/pull/4855): Fix race in TCP proxy shutdown. Thanks @runner-mei! +- [#4411](https://github.com/influxdata/influxdb/pull/4411): Add Access-Control-Expose-Headers to HTTP responses +- [#4768](https://github.com/influxdata/influxdb/pull/4768): CLI history skips blank lines. Thanks @pires +- [#4766](https://github.com/influxdata/influxdb/pull/4766): Update CLI usage output. Thanks @aneshas +- [#4804](https://github.com/influxdata/influxdb/pull/4804): Complete lint for services/admin. Thanks @nii236 +- [#4796](https://github.com/influxdata/influxdb/pull/4796): Check point without fields. Thanks @CrazyJvm +- [#4815](https://github.com/influxdata/influxdb/pull/4815): Added `Time` field into aggregate output across the cluster. Thanks @li-ang +- [#4817](https://github.com/influxdata/influxdb/pull/4817): Fix Min,Max,Top,Bottom function when query distributed node. Thanks @mengjinglei +- [#4878](https://github.com/influxdata/influxdb/pull/4878): Fix String() function for several InfluxQL statement types +- [#4913](https://github.com/influxdata/influxdb/pull/4913): Fix b1 flush deadlock +- [#3170](https://github.com/influxdata/influxdb/issues/3170), [#4921](https://github.com/influxdata/influxdb/pull/4921): Database does not exist error is now JSON. Thanks @pires! +- [#5029](https://github.com/influxdata/influxdb/pull/5029): Drop UDP point on bad parse. + +## v0.9.5 [2015-11-20] + +### Release Notes + +- Field names for the internal stats have been changed to be more inline with Go style. +- 0.9.5 is reverting to Go 1.4.2 due to unresolved issues with Go 1.5.1. + +There are breaking changes in this release: +- The filesystem hierarchy for packages has been changed, namely: + - Binaries are now located in `/usr/bin` (previously `/opt/influxdb`) + - Configuration files are now located in `/etc/influxdb` (previously `/etc/opt/influxdb`) + - Data directories are now located in `/var/lib/influxdb` (previously `/var/opt/influxdb`) + - Scripts are now located in `/usr/lib/influxdb/scripts` (previously `/opt/influxdb`) + +### Features +- [#4702](https://github.com/influxdata/influxdb/pull/4702): Support 'history' command at CLI +- [#4098](https://github.com/influxdata/influxdb/issues/4098): Enable `golint` on the code base - uuid subpackage +- [#4141](https://github.com/influxdata/influxdb/pull/4141): Control whether each query should be logged +- [#4065](https://github.com/influxdata/influxdb/pull/4065): Added precision support in cmd client. Thanks @sbouchex +- [#4140](https://github.com/influxdata/influxdb/pull/4140): Make storage engine configurable +- [#4161](https://github.com/influxdata/influxdb/pull/4161): Implement bottom selector function +- [#4204](https://github.com/influxdata/influxdb/pull/4204): Allow module-level selection for SHOW STATS +- [#4208](https://github.com/influxdata/influxdb/pull/4208): Allow module-level selection for SHOW DIAGNOSTICS +- [#4196](https://github.com/influxdata/influxdb/pull/4196): Export tsdb.Iterator +- [#4198](https://github.com/influxdata/influxdb/pull/4198): Add basic cluster-service stats +- [#4262](https://github.com/influxdata/influxdb/pull/4262): Allow configuration of UDP retention policy +- [#4265](https://github.com/influxdata/influxdb/pull/4265): Add statistics for Hinted-Handoff +- [#4284](https://github.com/influxdata/influxdb/pull/4284): Add exponential backoff for hinted-handoff failures +- [#4310](https://github.com/influxdata/influxdb/pull/4310): Support dropping non-Raft nodes. Work mostly by @corylanou +- [#4348](https://github.com/influxdata/influxdb/pull/4348): Public ApplyTemplate function for graphite parser. +- [#4178](https://github.com/influxdata/influxdb/pull/4178): Support fields in graphite parser. Thanks @roobert! +- [#4409](https://github.com/influxdata/influxdb/pull/4409): wire up INTO queries. +- [#4379](https://github.com/influxdata/influxdb/pull/4379): Auto-create database for UDP input. +- [#4375](https://github.com/influxdata/influxdb/pull/4375): Add Subscriptions so data can be 'forked' out of InfluxDB to another third party. +- [#4506](https://github.com/influxdata/influxdb/pull/4506): Register with Enterprise service and upload stats, if token is available. +- [#4516](https://github.com/influxdata/influxdb/pull/4516): Hinted-handoff refactor, with new statistics and diagnostics +- [#4501](https://github.com/influxdata/influxdb/pull/4501): Allow filtering SHOW MEASUREMENTS by regex. +- [#4547](https://github.com/influxdata/influxdb/pull/4547): Allow any node to be dropped, even a raft node (even the leader). +- [#4600](https://github.com/influxdata/influxdb/pull/4600): ping endpoint can wait for leader +- [#4648](https://github.com/influxdata/influxdb/pull/4648): UDP Client (v2 client) +- [#4690](https://github.com/influxdata/influxdb/pull/4690): SHOW SHARDS now includes database and policy. Thanks @pires +- [#4676](https://github.com/influxdata/influxdb/pull/4676): UDP service listener performance enhancements +- [#4659](https://github.com/influxdata/influxdb/pull/4659): Support IF EXISTS for DROP DATABASE. Thanks @ch33hau +- [#4721](https://github.com/influxdata/influxdb/pull/4721): Export tsdb.InterfaceValues +- [#4681](https://github.com/influxdata/influxdb/pull/4681): Increase default buffer size for collectd and graphite listeners +- [#4685](https://github.com/influxdata/influxdb/pull/4685): Automatically promote node to raft peer if drop server results in removing a raft peer. +- [#4846](https://github.com/influxdata/influxdb/pull/4846): Allow NaN as a valid value on the graphite service; discard these points silently (graphite compatibility). Thanks @jsternberg! + +### Bugfixes +- [#4193](https://github.com/influxdata/influxdb/issues/4193): Less than or equal to inequality is not inclusive for time in where clause +- [#4235](https://github.com/influxdata/influxdb/issues/4235): "ORDER BY DESC" doesn't properly order +- [#4789](https://github.com/influxdata/influxdb/pull/4789): Decode WHERE fields during aggregates. Fix [issue #4701](https://github.com/influxdata/influxdb/issues/4701). +- [#4778](https://github.com/influxdata/influxdb/pull/4778): If there are no points to count, count is 0. +- [#4715](https://github.com/influxdata/influxdb/pull/4715): Fix panic during Raft-close. Fix [issue #4707](https://github.com/influxdata/influxdb/issues/4707). Thanks @oiooj +- [#4643](https://github.com/influxdata/influxdb/pull/4643): Fix panic during backup restoration. Thanks @oiooj +- [#4632](https://github.com/influxdata/influxdb/pull/4632): Fix parsing of IPv6 hosts in client package. Thanks @miguelxpn +- [#4389](https://github.com/influxdata/influxdb/pull/4389): Don't add a new segment file on each hinted-handoff purge cycle. +- [#4166](https://github.com/influxdata/influxdb/pull/4166): Fix parser error on invalid SHOW +- [#3457](https://github.com/influxdata/influxdb/issues/3457): [0.9.3] cannot select field names with prefix + "." that match the measurement name +- [#4704](https://github.com/influxdata/influxdb/pull/4704). Tighten up command parsing within CLI. Thanks @pires +- [#4225](https://github.com/influxdata/influxdb/pull/4225): Always display diags in name-sorted order +- [#4111](https://github.com/influxdata/influxdb/pull/4111): Update pre-commit hook for go vet composites +- [#4136](https://github.com/influxdata/influxdb/pull/4136): Return an error-on-write if target retention policy does not exist. Thanks for the report @ymettier +- [#4228](https://github.com/influxdata/influxdb/pull/4228): Add build timestamp to version information. +- [#4124](https://github.com/influxdata/influxdb/issues/4124): Missing defer/recover/panic idiom in HTTPD service +- [#4238](https://github.com/influxdata/influxdb/pull/4238): Fully disable hinted-handoff service if so requested. +- [#4165](https://github.com/influxdata/influxdb/pull/4165): Tag all Go runtime stats when writing to internal database. +- [#4586](https://github.com/influxdata/influxdb/pull/4586): Exit when invalid engine is selected +- [#4118](https://github.com/influxdata/influxdb/issues/4118): Return consistent, correct result for SHOW MEASUREMENTS with multiple AND conditions +- [#4191](https://github.com/influxdata/influxdb/pull/4191): Correctly marshal remote mapper responses. Fixes [#4170](https://github.com/influxdata/influxdb/issues/4170) +- [#4222](https://github.com/influxdata/influxdb/pull/4222): Graphite TCP connections should not block shutdown +- [#4180](https://github.com/influxdata/influxdb/pull/4180): Cursor & SelectMapper Refactor +- [#1577](https://github.com/influxdata/influxdb/issues/1577): selectors (e.g. min, max, first, last) should have equivalents to return the actual point +- [#4264](https://github.com/influxdata/influxdb/issues/4264): Refactor map functions to use list of values +- [#4278](https://github.com/influxdata/influxdb/pull/4278): Fix error marshalling across the cluster +- [#4149](https://github.com/influxdata/influxdb/pull/4149): Fix derivative unnecessarily requires aggregate function. Thanks @peekeri! +- [#4674](https://github.com/influxdata/influxdb/pull/4674): Fix panic during restore. Thanks @simcap. +- [#4725](https://github.com/influxdata/influxdb/pull/4725): Don't list deleted shards during SHOW SHARDS. +- [#4237](https://github.com/influxdata/influxdb/issues/4237): DERIVATIVE() edge conditions +- [#4263](https://github.com/influxdata/influxdb/issues/4263): derivative does not work when data is missing +- [#4293](https://github.com/influxdata/influxdb/pull/4293): Ensure shell is invoked when touching PID file. Thanks @christopherjdickson +- [#4296](https://github.com/influxdata/influxdb/pull/4296): Reject line protocol ending with '-'. Fixes [#4272](https://github.com/influxdata/influxdb/issues/4272) +- [#4333](https://github.com/influxdata/influxdb/pull/4333): Retry monitor storage creation and storage only on Leader. +- [#4276](https://github.com/influxdata/influxdb/issues/4276): Walk DropSeriesStatement & check for empty sources +- [#4465](https://github.com/influxdata/influxdb/pull/4465): Actually display a message if the CLI can't connect to the database. +- [#4342](https://github.com/influxdata/influxdb/pull/4342): Fix mixing aggregates and math with non-aggregates. Thanks @kostya-sh. +- [#4349](https://github.com/influxdata/influxdb/issues/4349): If HH can't unmarshal a block, skip that block. +- [#4502](https://github.com/influxdata/influxdb/pull/4502): Don't crash on Graphite close, if Graphite not fully open. Thanks for the report @ranjib +- [#4354](https://github.com/influxdata/influxdb/pull/4353): Fully lock node queues during hinted handoff. Fixes one cause of missing data on clusters. +- [#4357](https://github.com/influxdata/influxdb/issues/4357): Fix similar float values encoding overflow Thanks @dgryski! +- [#4344](https://github.com/influxdata/influxdb/issues/4344): Make client.Write default to client.precision if none is given. +- [#3429](https://github.com/influxdata/influxdb/issues/3429): Incorrect parsing of regex containing '/' +- [#4374](https://github.com/influxdata/influxdb/issues/4374): Add tsm1 quickcheck tests +- [#4644](https://github.com/influxdata/influxdb/pull/4644): Check for response errors during token check, fixes issue [#4641](https://github.com/influxdata/influxdb/issues/4641) +- [#4377](https://github.com/influxdata/influxdb/pull/4377): Hinted handoff should not process dropped nodes +- [#4365](https://github.com/influxdata/influxdb/issues/4365): Prevent panic in DecodeSameTypeBlock +- [#4280](https://github.com/influxdata/influxdb/issues/4280): Only drop points matching WHERE clause +- [#4443](https://github.com/influxdata/influxdb/pull/4443): Fix race condition while listing store's shards. Fixes [#4442](https://github.com/influxdata/influxdb/issues/4442) +- [#4410](https://github.com/influxdata/influxdb/pull/4410): Fix infinite recursion in statement string(). Thanks @kostya-sh +- [#4360](https://github.com/influxdata/influxdb/issues/4360): Aggregate Selectors overwrite values during post-processing +- [#4421](https://github.com/influxdata/influxdb/issues/4421): Fix line protocol accepting tags with no values +- [#4434](https://github.com/influxdata/influxdb/pull/4434): Allow 'E' for scientific values. Fixes [#4433](https://github.com/influxdata/influxdb/issues/4433) +- [#4431](https://github.com/influxdata/influxdb/issues/4431): Add tsm1 WAL QuickCheck +- [#4438](https://github.com/influxdata/influxdb/pull/4438): openTSDB service shutdown fixes +- [#4447](https://github.com/influxdata/influxdb/pull/4447): Fixes to logrotate file. Thanks @linsomniac. +- [#3820](https://github.com/influxdata/influxdb/issues/3820): Fix js error in admin UI. +- [#4460](https://github.com/influxdata/influxdb/issues/4460): tsm1 meta lint +- [#4415](https://github.com/influxdata/influxdb/issues/4415): Selector (like max, min, first, etc) return a string instead of timestamp +- [#4472](https://github.com/influxdata/influxdb/issues/4472): Fix 'too many points in GROUP BY interval' error +- [#4475](https://github.com/influxdata/influxdb/issues/4475): Fix SHOW TAG VALUES error message. +- [#4486](https://github.com/influxdata/influxdb/pull/4486): Fix missing comments for runner package +- [#4497](https://github.com/influxdata/influxdb/pull/4497): Fix sequence in meta proto +- [#3367](https://github.com/influxdata/influxdb/issues/3367): Negative timestamps are parsed correctly by the line protocol. +- [#4563](https://github.com/influxdata/influxdb/pull/4536): Fix broken subscriptions updates. +- [#4538](https://github.com/influxdata/influxdb/issues/4538): Dropping database under a write load causes panics +- [#4582](https://github.com/influxdata/influxdb/pull/4582): Correct logging tags in cluster and TCP package. Thanks @oiooj +- [#4513](https://github.com/influxdata/influxdb/issues/4513): TSM1: panic: runtime error: index out of range +- [#4521](https://github.com/influxdata/influxdb/issues/4521): TSM1: panic: decode of short block: got 1, exp 9 +- [#4587](https://github.com/influxdata/influxdb/pull/4587): Prevent NaN float values from being stored +- [#4596](https://github.com/influxdata/influxdb/pull/4596): Skip empty string for start position when parsing line protocol @Thanks @ch33hau +- [#4610](https://github.com/influxdata/influxdb/pull/4610): Make internal stats names consistent with Go style. +- [#4625](https://github.com/influxdata/influxdb/pull/4625): Correctly handle bad write requests. Thanks @oiooj. +- [#4650](https://github.com/influxdata/influxdb/issues/4650): Importer should skip empty lines +- [#4651](https://github.com/influxdata/influxdb/issues/4651): Importer doesn't flush out last batch +- [#4602](https://github.com/influxdata/influxdb/issues/4602): Fixes data race between PointsWriter and Subscriber services. +- [#4691](https://github.com/influxdata/influxdb/issues/4691): Enable toml test `TestConfig_Encode`. +- [#4283](https://github.com/influxdata/influxdb/pull/4283): Disable HintedHandoff if configuration is not set. +- [#4703](https://github.com/influxdata/influxdb/pull/4703): Complete lint for cmd/influx. Thanks @pablolmiranda + +## v0.9.4 [2015-09-14] + +### Release Notes +With this release InfluxDB is moving to Go 1.5. + +### Features +- [#4050](https://github.com/influxdata/influxdb/pull/4050): Add stats to collectd +- [#3771](https://github.com/influxdata/influxdb/pull/3771): Close idle Graphite TCP connections +- [#3755](https://github.com/influxdata/influxdb/issues/3755): Add option to build script. Thanks @fg2it +- [#3863](https://github.com/influxdata/influxdb/pull/3863): Move to Go 1.5 +- [#3892](https://github.com/influxdata/influxdb/pull/3892): Support IF NOT EXISTS for CREATE DATABASE +- [#3916](https://github.com/influxdata/influxdb/pull/3916): New statistics and diagnostics support. Graphite first to be instrumented. +- [#3901](https://github.com/influxdata/influxdb/pull/3901): Add consistency level option to influx cli Thanks @takayuki +- [#4048](https://github.com/influxdata/influxdb/pull/4048): Add statistics to Continuous Query service +- [#4049](https://github.com/influxdata/influxdb/pull/4049): Add stats to the UDP input +- [#3876](https://github.com/influxdata/influxdb/pull/3876): Allow the following syntax in CQs: INTO "1hPolicy".:MEASUREMENT +- [#3975](https://github.com/influxdata/influxdb/pull/3975): Add shard copy service +- [#3986](https://github.com/influxdata/influxdb/pull/3986): Support sorting by time desc +- [#3930](https://github.com/influxdata/influxdb/pull/3930): Wire up TOP aggregate function - fixes [#1821](https://github.com/influxdata/influxdb/issues/1821) +- [#4045](https://github.com/influxdata/influxdb/pull/4045): Instrument cluster-level points writer +- [#3996](https://github.com/influxdata/influxdb/pull/3996): Add statistics to httpd package +- [#4003](https://github.com/influxdata/influxdb/pull/4033): Add logrotate configuration. +- [#4043](https://github.com/influxdata/influxdb/pull/4043): Add stats and batching to openTSDB input +- [#4042](https://github.com/influxdata/influxdb/pull/4042): Add pending batches control to batcher +- [#4006](https://github.com/influxdata/influxdb/pull/4006): Add basic statistics for shards +- [#4072](https://github.com/influxdata/influxdb/pull/4072): Add statistics for the WAL. + +### Bugfixes +- [#4042](https://github.com/influxdata/influxdb/pull/4042): Set UDP input batching defaults as needed. +- [#3785](https://github.com/influxdata/influxdb/issues/3785): Invalid time stamp in graphite metric causes panic +- [#3804](https://github.com/influxdata/influxdb/pull/3804): init.d script fixes, fixes issue 3803. +- [#3823](https://github.com/influxdata/influxdb/pull/3823): Deterministic ordering for first() and last() +- [#3869](https://github.com/influxdata/influxdb/issues/3869): Seemingly deadlocked when ingesting metrics via graphite plugin +- [#3856](https://github.com/influxdata/influxdb/pull/3856): Minor changes to retention enforcement. +- [#3884](https://github.com/influxdata/influxdb/pull/3884): Fix two panics in WAL that can happen at server startup +- [#3868](https://github.com/influxdata/influxdb/pull/3868): Add shell option to start the daemon on CentOS. Thanks @SwannCroiset. +- [#3886](https://github.com/influxdata/influxdb/pull/3886): Prevent write timeouts due to lock contention in WAL +- [#3574](https://github.com/influxdata/influxdb/issues/3574): Querying data node causes panic +- [#3913](https://github.com/influxdata/influxdb/issues/3913): Convert meta shard owners to objects +- [#4026](https://github.com/influxdata/influxdb/pull/4026): Support multiple Graphite inputs. Fixes issue [#3636](https://github.com/influxdata/influxdb/issues/3636) +- [#3927](https://github.com/influxdata/influxdb/issues/3927): Add WAL lock to prevent timing lock contention +- [#3928](https://github.com/influxdata/influxdb/issues/3928): Write fails for multiple points when tag starts with quote +- [#3901](https://github.com/influxdata/influxdb/pull/3901): Unblock relaxed write consistency level Thanks @takayuki! +- [#3950](https://github.com/influxdata/influxdb/pull/3950): Limit bz1 quickcheck tests to 10 iterations on CI +- [#3977](https://github.com/influxdata/influxdb/pull/3977): Silence wal logging during testing +- [#3931](https://github.com/influxdata/influxdb/pull/3931): Don't precreate shard groups entirely in the past +- [#3960](https://github.com/influxdata/influxdb/issues/3960): possible "catch up" bug with nodes down in a cluster +- [#3980](https://github.com/influxdata/influxdb/pull/3980): 'service stop' waits until service actually stops. Fixes issue #3548. +- [#4016](https://github.com/influxdata/influxdb/pull/4016): Shutdown Graphite UDP on SIGTERM. +- [#4034](https://github.com/influxdata/influxdb/pull/4034): Rollback bolt tx on mapper open error +- [#3848](https://github.com/influxdata/influxdb/issues/3848): restart influxdb causing panic +- [#3881](https://github.com/influxdata/influxdb/issues/3881): panic: runtime error: invalid memory address or nil pointer dereference +- [#3926](https://github.com/influxdata/influxdb/issues/3926): First or last value of `GROUP BY time(x)` is often null. Fixed by [#4038](https://github.com/influxdata/influxdb/pull/4038) +- [#4053](https://github.com/influxdata/influxdb/pull/4053): Prohibit dropping default retention policy. +- [#4060](https://github.com/influxdata/influxdb/pull/4060): Don't log EOF error in openTSDB input. +- [#3978](https://github.com/influxdata/influxdb/issues/3978): [0.9.3] (regression) cannot use GROUP BY * with more than a single field in SELECT clause +- [#4058](https://github.com/influxdata/influxdb/pull/4058): Disable bz1 recompression +- [#3902](https://github.com/influxdata/influxdb/issues/3902): [0.9.3] DB should not crash when using invalid expression "GROUP BY time" +- [#3718](https://github.com/influxdata/influxdb/issues/3718): Derivative query with group by time but no aggregate function should fail parse + +## v0.9.3 [2015-08-26] + +### Release Notes + +There are breaking changes in this release. + - To store data points as integers you must now append `i` to the number if using the line protocol. + - If you have a UDP input configured, you should check the UDP section of [the new sample configuration file](https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml) to learn how to modify existing configuration files, as 0.9.3 now expects multiple UDP inputs. + - Configuration files must now have an entry for `wal-dir` in the `[data]` section. Check [new sample configuration file](https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml) for more details. + - The implicit `GROUP BY *` that was added to every `SELECT *` has been removed. Instead any tags in the data are now part of the columns in the returned query. + +Please see the *Features* section below for full details. + +### Features +- [#3376](https://github.com/influxdata/influxdb/pull/3376): Support for remote shard query mapping +- [#3372](https://github.com/influxdata/influxdb/pull/3372): Support joining nodes to existing cluster +- [#3426](https://github.com/influxdata/influxdb/pull/3426): Additional logging for continuous queries. Thanks @jhorwit2 +- [#3478](https://github.com/influxdata/influxdb/pull/3478): Support incremental cluster joins +- [#3519](https://github.com/influxdata/influxdb/pull/3519): **--BREAKING CHANGE--** Update line protocol to require trailing i for field values that are integers +- [#3529](https://github.com/influxdata/influxdb/pull/3529): Add TLS support for OpenTSDB plugin. Thanks @nathanielc +- [#3421](https://github.com/influxdata/influxdb/issues/3421): Should update metastore and cluster if IP or hostname changes +- [#3502](https://github.com/influxdata/influxdb/pull/3502): Importer for 0.8.9 data via the CLI +- [#3564](https://github.com/influxdata/influxdb/pull/3564): Fix alias, maintain column sort order +- [#3585](https://github.com/influxdata/influxdb/pull/3585): Additional test coverage for non-existent fields +- [#3246](https://github.com/influxdata/influxdb/issues/3246): Allow overriding of configuration parameters using environment variables +- [#3599](https://github.com/influxdata/influxdb/pull/3599): **--BREAKING CHANGE--** Support multiple UDP inputs. Thanks @tpitale +- [#3636](https://github.com/influxdata/influxdb/pull/3639): Cap auto-created retention policy replica count at 3 +- [#3641](https://github.com/influxdata/influxdb/pull/3641): Logging enhancements and single-node rename +- [#3635](https://github.com/influxdata/influxdb/pull/3635): Add build branch to version output. +- [#3115](https://github.com/influxdata/influxdb/pull/3115): Various init.d script improvements. Thanks @KoeSystems. +- [#3628](https://github.com/influxdata/influxdb/pull/3628): Wildcard expansion of tags and fields for raw queries +- [#3721](https://github.com/influxdata/influxdb/pull/3721): interpret number literals compared against time as nanoseconds from epoch +- [#3514](https://github.com/influxdata/influxdb/issues/3514): Implement WAL outside BoltDB with compaction +- [#3544](https://github.com/influxdata/influxdb/pull/3544): Implement compression on top of BoltDB +- [#3795](https://github.com/influxdata/influxdb/pull/3795): Throttle import +- [#3584](https://github.com/influxdata/influxdb/pull/3584): Import/export documenation + +### Bugfixes +- [#3405](https://github.com/influxdata/influxdb/pull/3405): Prevent database panic when fields are missing. Thanks @jhorwit2 +- [#3411](https://github.com/influxdata/influxdb/issues/3411): 500 timeout on write +- [#3420](https://github.com/influxdata/influxdb/pull/3420): Catch opentsdb malformed tags. Thanks @nathanielc. +- [#3404](https://github.com/influxdata/influxdb/pull/3404): Added support for escaped single quotes in query string. Thanks @jhorwit2 +- [#3414](https://github.com/influxdata/influxdb/issues/3414): Shard mappers perform query re-writing +- [#3525](https://github.com/influxdata/influxdb/pull/3525): check if fields are valid during parse time. +- [#3511](https://github.com/influxdata/influxdb/issues/3511): Sending a large number of tag causes panic +- [#3288](https://github.com/influxdata/influxdb/issues/3288): Run go fuzz on the line-protocol input +- [#3545](https://github.com/influxdata/influxdb/issues/3545): Fix parsing string fields with newlines +- [#3579](https://github.com/influxdata/influxdb/issues/3579): Revert breaking change to `client.NewClient` function +- [#3580](https://github.com/influxdata/influxdb/issues/3580): Do not allow wildcards with fields in select statements +- [#3530](https://github.com/influxdata/influxdb/pull/3530): Aliasing a column no longer works +- [#3436](https://github.com/influxdata/influxdb/issues/3436): Fix panic in hinted handoff queue processor +- [#3401](https://github.com/influxdata/influxdb/issues/3401): Derivative on non-numeric fields panics db +- [#3583](https://github.com/influxdata/influxdb/issues/3583): Inserting value in scientific notation with a trailing i causes panic +- [#3611](https://github.com/influxdata/influxdb/pull/3611): Fix query arithmetic with integers +- [#3326](https://github.com/influxdata/influxdb/issues/3326): simple regex query fails with cryptic error +- [#3618](https://github.com/influxdata/influxdb/pull/3618): Fix collectd stats panic on i386. Thanks @richterger +- [#3625](https://github.com/influxdata/influxdb/pull/3625): Don't panic when aggregate and raw queries are in a single statement +- [#3629](https://github.com/influxdata/influxdb/pull/3629): Use sensible batching defaults for Graphite. +- [#3638](https://github.com/influxdata/influxdb/pull/3638): Cluster config fixes and removal of meta.peers config field +- [#3640](https://github.com/influxdata/influxdb/pull/3640): Shutdown Graphite service when signal received. +- [#3632](https://github.com/influxdata/influxdb/issues/3632): Make single-node host renames more seamless +- [#3656](https://github.com/influxdata/influxdb/issues/3656): Silence snapshotter logger for testing +- [#3651](https://github.com/influxdata/influxdb/pull/3651): Fully remove series when dropped. +- [#3517](https://github.com/influxdata/influxdb/pull/3517): Batch CQ writes to avoid timeouts. Thanks @dim. +- [#3522](https://github.com/influxdata/influxdb/pull/3522): Consume CQ results on request timeouts. Thanks @dim. +- [#3646](https://github.com/influxdata/influxdb/pull/3646): Fix nil FieldCodec panic. +- [#3672](https://github.com/influxdata/influxdb/pull/3672): Reduce in-memory index by 20%-30% +- [#3673](https://github.com/influxdata/influxdb/pull/3673): Improve query performance by removing unnecessary tagset sorting. +- [#3676](https://github.com/influxdata/influxdb/pull/3676): Improve query performance by memomizing mapper output keys. +- [#3686](https://github.com/influxdata/influxdb/pull/3686): Ensure 'p' parameter is not logged, even on OPTIONS requests. +- [#3687](https://github.com/influxdata/influxdb/issues/3687): Fix panic: runtime error: makeslice: len out of range in hinted handoff +- [#3697](https://github.com/influxdata/influxdb/issues/3697): Correctly merge non-chunked results for same series. Fix issue #3242. +- [#3708](https://github.com/influxdata/influxdb/issues/3708): Fix double escaping measurement name during cluster replication +- [#3704](https://github.com/influxdata/influxdb/issues/3704): cluster replication issue for measurement name containing backslash +- [#3681](https://github.com/influxdata/influxdb/issues/3681): Quoted measurement names fail +- [#3681](https://github.com/influxdata/influxdb/issues/3682): Fix inserting string value with backslashes +- [#3735](https://github.com/influxdata/influxdb/issues/3735): Append to small bz1 blocks +- [#3736](https://github.com/influxdata/influxdb/pull/3736): Update shard group duration with retention policy changes. Thanks for the report @papylhomme +- [#3539](https://github.com/influxdata/influxdb/issues/3539): parser incorrectly accepts NaN as numerical value, but not always +- [#3790](https://github.com/influxdata/influxdb/pull/3790): Fix line protocol parsing equals in measurements and NaN values +- [#3778](https://github.com/influxdata/influxdb/pull/3778): Don't panic if SELECT on time. +- [#3824](https://github.com/influxdata/influxdb/issues/3824): tsdb.Point.MarshalBinary needs to support all number types +- [#3828](https://github.com/influxdata/influxdb/pull/3828): Support all number types when decoding a point +- [#3853](https://github.com/influxdata/influxdb/pull/3853): Use 4KB default block size for bz1 +- [#3607](https://github.com/influxdata/influxdb/issues/3607): Fix unable to query influxdb due to deadlock in metastore. Thanks @ccutrer! + +## v0.9.2 [2015-07-24] + +### Features +- [#3177](https://github.com/influxdata/influxdb/pull/3177): Client supports making HTTPS requests. Thanks @jipperinbham +- [#3299](https://github.com/influxdata/influxdb/pull/3299): Refactor query engine for distributed query support. +- [#3334](https://github.com/influxdata/influxdb/pull/3334): Clean shutdown of influxd. Thanks @mcastilho + +### Bugfixes + +- [#3180](https://github.com/influxdata/influxdb/pull/3180): Log GOMAXPROCS, version, and commit on startup. +- [#3218](https://github.com/influxdata/influxdb/pull/3218): Allow write timeouts to be configurable. +- [#3184](https://github.com/influxdata/influxdb/pull/3184): Support basic auth in admin interface. Thanks @jipperinbham! +- [#3236](https://github.com/influxdata/influxdb/pull/3236): Fix display issues in admin interface. +- [#3232](https://github.com/influxdata/influxdb/pull/3232): Set logging prefix for metastore. +- [#3230](https://github.com/influxdata/influxdb/issues/3230): panic: unable to parse bool value +- [#3245](https://github.com/influxdata/influxdb/issues/3245): Error using graphite plugin with multiple filters +- [#3223](https://github.com/influxdata/influxdb/issues/323): default graphite template cannot have extra tags +- [#3255](https://github.com/influxdata/influxdb/pull/3255): Flush WAL on start-up as soon as possible. +- [#3289](https://github.com/influxdata/influxdb/issues/3289): InfluxDB crashes on floats without decimal +- [#3298](https://github.com/influxdata/influxdb/pull/3298): Corrected WAL & flush parameters in default config. Thanks @jhorwit2 +- [#3152](https://github.com/influxdata/influxdb/issues/3159): High CPU Usage with unsorted writes +- [#3307](https://github.com/influxdata/influxdb/pull/3307): Fix regression parsing boolean values True/False +- [#3304](https://github.com/influxdata/influxdb/pull/3304): Fixed httpd logger to log user from query params. Thanks @jhorwit2 +- [#3332](https://github.com/influxdata/influxdb/pull/3332): Add SLIMIT and SOFFSET to string version of AST. +- [#3335](https://github.com/influxdata/influxdb/pull/3335): Don't drop all data on DROP DATABASE. Thanks to @PierreF for the report +- [#2761](https://github.com/influxdata/influxdb/issues/2761): Make SHOW RETENTION POLICIES consistent with other queries. +- [#3356](https://github.com/influxdata/influxdb/pull/3356): Disregard semicolons after database name in use command. Thanks @timraymond. +- [#3351](https://github.com/influxdata/influxdb/pull/3351): Handle malformed regex comparisons during parsing. Thanks @rnubel +- [#3244](https://github.com/influxdata/influxdb/pull/3244): Wire up admin privilege grant and revoke. +- [#3259](https://github.com/influxdata/influxdb/issues/3259): Respect privileges for queries. +- [#3256](https://github.com/influxdata/influxdb/pull/3256): Remove unnecessary timeout in WaitForLeader(). Thanks @cannium. +- [#3380](https://github.com/influxdata/influxdb/issues/3380): Parser fix, only allow ORDER BY ASC and ORDER BY time ASC. +- [#3319](https://github.com/influxdata/influxdb/issues/3319): restarting process irrevocably BREAKS measurements with spaces +- [#3453](https://github.com/influxdata/influxdb/issues/3453): Remove outdated `dump` command from CLI. +- [#3463](https://github.com/influxdata/influxdb/issues/3463): Fix aggregate queries and time precision on where clauses. + +## v0.9.1 [2015-07-02] + +### Features + +- [2650](https://github.com/influxdata/influxdb/pull/2650): Add SHOW GRANTS FOR USER statement. Thanks @n1tr0g. +- [3125](https://github.com/influxdata/influxdb/pull/3125): Graphite Input Protocol Parsing +- [2746](https://github.com/influxdata/influxdb/pull/2746): New Admin UI/interface +- [3036](https://github.com/influxdata/influxdb/pull/3036): Write Ahead Log (WAL) +- [3014](https://github.com/influxdata/influxdb/issues/3014): Implement Raft snapshots + +### Bugfixes + +- [3013](https://github.com/influxdata/influxdb/issues/3013): Panic error with inserting values with commas +- [#2956](https://github.com/influxdata/influxdb/issues/2956): Type mismatch in derivative +- [#2908](https://github.com/influxdata/influxdb/issues/2908): Field mismatch error messages need to be updated +- [#2931](https://github.com/influxdata/influxdb/pull/2931): Services and reporting should wait until cluster has leader. +- [#2943](https://github.com/influxdata/influxdb/issues/2943): Ensure default retention policies are fully replicated +- [#2948](https://github.com/influxdata/influxdb/issues/2948): Field mismatch error message to include measurement name +- [#2919](https://github.com/influxdata/influxdb/issues/2919): Unable to insert negative floats +- [#2935](https://github.com/influxdata/influxdb/issues/2935): Hook CPU and memory profiling back up. +- [#2960](https://github.com/influxdata/influxdb/issues/2960): Cluster Write Errors. +- [#2928](https://github.com/influxdata/influxdb/pull/2928): Start work to set InfluxDB version in HTTP response headers. Thanks @neonstalwart. +- [#2969](https://github.com/influxdata/influxdb/pull/2969): Actually set HTTP version in responses. +- [#2993](https://github.com/influxdata/influxdb/pull/2993): Don't log each UDP batch. +- [#2994](https://github.com/influxdata/influxdb/pull/2994): Don't panic during wilcard expansion if no default database specified. +- [#3002](https://github.com/influxdata/influxdb/pull/3002): Remove measurement from shard's index on DROP MEASUREMENT. +- [#3021](https://github.com/influxdata/influxdb/pull/3021): Correct set HTTP write trace logging. Thanks @vladlopes. +- [#3027](https://github.com/influxdata/influxdb/pull/3027): Enforce minimum retention policy duration of 1 hour. +- [#3030](https://github.com/influxdata/influxdb/pull/3030): Fix excessive logging of shard creation. +- [#3038](https://github.com/influxdata/influxdb/pull/3038): Don't check deleted shards for precreation. Thanks @vladlopes. +- [#3033](https://github.com/influxdata/influxdb/pull/3033): Add support for marshaling `uint64` in client. +- [#3090](https://github.com/influxdata/influxdb/pull/3090): Remove database from TSDB index on DROP DATABASE. +- [#2944](https://github.com/influxdata/influxdb/issues/2944): Don't require "WHERE time" when creating continuous queries. +- [#3075](https://github.com/influxdata/influxdb/pull/3075): GROUP BY correctly when different tags have same value. +- [#3078](https://github.com/influxdata/influxdb/pull/3078): Fix CLI panic on malformed INSERT. +- [#2102](https://github.com/influxdata/influxdb/issues/2102): Re-work Graphite input and metric processing +- [#2996](https://github.com/influxdata/influxdb/issues/2996): Graphite Input Parsing +- [#3136](https://github.com/influxdata/influxdb/pull/3136): Fix various issues with init.d script. Thanks @ miguelcnf. +- [#2996](https://github.com/influxdata/influxdb/issues/2996): Graphite Input Parsing +- [#3127](https://github.com/influxdata/influxdb/issues/3127): Trying to insert a number larger than the largest signed 64-bit number kills influxd +- [#3131](https://github.com/influxdata/influxdb/pull/3131): Copy batch tags to each point before marshalling +- [#3155](https://github.com/influxdata/influxdb/pull/3155): Instantiate UDP batcher before listening for UDP traffic, otherwise a panic may result. +- [#2678](https://github.com/influxdata/influxdb/issues/2678): Server allows tags with an empty string for the key and/or value +- [#3061](https://github.com/influxdata/influxdb/issues/3061): syntactically incorrect line protocol insert panics the database +- [#2608](https://github.com/influxdata/influxdb/issues/2608): drop measurement while writing points to that measurement has race condition that can panic +- [#3183](https://github.com/influxdata/influxdb/issues/3183): using line protocol measurement names cannot contain commas +- [#3193](https://github.com/influxdata/influxdb/pull/3193): Fix panic for SHOW STATS and in collectd +- [#3102](https://github.com/influxdata/influxdb/issues/3102): Add authentication cache +- [#3209](https://github.com/influxdata/influxdb/pull/3209): Dump Run() errors to stderr +- [#3217](https://github.com/influxdata/influxdb/pull/3217): Allow WAL partition flush delay to be configurable. + +## v0.9.0 [2015-06-11] + +### Bugfixes + +- [#2869](https://github.com/influxdata/influxdb/issues/2869): Adding field to existing measurement causes panic +- [#2849](https://github.com/influxdata/influxdb/issues/2849): RC32: Frequent write errors +- [#2700](https://github.com/influxdata/influxdb/issues/2700): Incorrect error message in database EncodeFields +- [#2897](https://github.com/influxdata/influxdb/pull/2897): Ensure target Graphite database exists +- [#2898](https://github.com/influxdata/influxdb/pull/2898): Ensure target openTSDB database exists +- [#2895](https://github.com/influxdata/influxdb/pull/2895): Use Graphite input defaults where necessary +- [#2900](https://github.com/influxdata/influxdb/pull/2900): Use openTSDB input defaults where necessary +- [#2886](https://github.com/influxdata/influxdb/issues/2886): Refactor backup & restore +- [#2804](https://github.com/influxdata/influxdb/pull/2804): BREAKING: change time literals to be single quoted in InfluxQL. Thanks @nvcook42! +- [#2906](https://github.com/influxdata/influxdb/pull/2906): Restrict replication factor to the cluster size +- [#2905](https://github.com/influxdata/influxdb/pull/2905): Restrict clusters to 3 peers +- [#2904](https://github.com/influxdata/influxdb/pull/2904): Re-enable server reporting. +- [#2917](https://github.com/influxdata/influxdb/pull/2917): Fix int64 field values. +- [#2920](https://github.com/influxdata/influxdb/issues/2920): Ensure collectd database exists + +## v0.9.0-rc33 [2015-06-09] + +### Bugfixes + +- [#2816](https://github.com/influxdata/influxdb/pull/2816): Enable UDP service. Thanks @renan- +- [#2824](https://github.com/influxdata/influxdb/pull/2824): Add missing call to WaitGroup.Done in execConn. Thanks @liyichao +- [#2823](https://github.com/influxdata/influxdb/pull/2823): Convert OpenTSDB to a service. +- [#2838](https://github.com/influxdata/influxdb/pull/2838): Set auto-created retention policy period to infinite. +- [#2829](https://github.com/influxdata/influxdb/pull/2829): Re-enable Graphite support as a new Service-style component. +- [#2814](https://github.com/influxdata/influxdb/issues/2814): Convert collectd to a service. +- [#2852](https://github.com/influxdata/influxdb/pull/2852): Don't panic when altering retention policies. Thanks for the report @huhongbo +- [#2857](https://github.com/influxdata/influxdb/issues/2857): Fix parsing commas in string field values. +- [#2833](https://github.com/influxdata/influxdb/pull/2833): Make the default config valid. +- [#2859](https://github.com/influxdata/influxdb/pull/2859): Fix panic on aggregate functions. +- [#2878](https://github.com/influxdata/influxdb/pull/2878): Re-enable shard precreation. +- [2865](https://github.com/influxdata/influxdb/pull/2865) -- Return an empty set of results if database does not exist in shard metadata. + +### Features +- [2858](https://github.com/influxdata/influxdb/pull/2858): Support setting openTSDB write consistency. + +## v0.9.0-rc32 [2015-06-07] + +### Release Notes + +This released introduced an updated write path and clustering design. The data format has also changed, so you'll need to wipe out your data to upgrade from RC31. There should be no other data changes before v0.9.0 is released. + +### Features +- [#1997](https://github.com/influxdata/influxdb/pull/1997): Update SELECT * to return tag values. +- [#2599](https://github.com/influxdata/influxdb/issues/2599): Add "epoch" URL param and return JSON time values as epoch instead of date strings. +- [#2682](https://github.com/influxdata/influxdb/issues/2682): Adding pr checklist to CONTRIBUTING.md +- [#2683](https://github.com/influxdata/influxdb/issues/2683): Add batching support to Graphite inputs. +- [#2687](https://github.com/influxdata/influxdb/issues/2687): Add batching support to Collectd inputs. +- [#2696](https://github.com/influxdata/influxdb/pull/2696): Add line protocol. This is now the preferred way to write data. +- [#2751](https://github.com/influxdata/influxdb/pull/2751): Add UDP input. UDP only supports the line protocol now. +- [#2684](https://github.com/influxdata/influxdb/pull/2684): Include client timeout configuration. Thanks @vladlopes! + +### Bugfixes +- [#2776](https://github.com/influxdata/influxdb/issues/2776): Re-implement retention policy enforcement. +- [#2635](https://github.com/influxdata/influxdb/issues/2635): Fix querying against boolean field in WHERE clause. +- [#2644](https://github.com/influxdata/influxdb/issues/2644): Make SHOW queries work with FROM //. +- [#2501](https://github.com/influxdata/influxdb/issues/2501): Name the FlagSet for the shell and add a version flag. Thanks @neonstalwart +- [#2647](https://github.com/influxdata/influxdb/issues/2647): Fixes typos in sample config file - thanks @claws! + +## v0.9.0-rc31 [2015-05-21] + +### Features +- [#1822](https://github.com/influxdata/influxdb/issues/1822): Wire up DERIVATIVE aggregate +- [#1477](https://github.com/influxdata/influxdb/issues/1477): Wire up non_negative_derivative function +- [#2557](https://github.com/influxdata/influxdb/issues/2557): Fix false positive error with `GROUP BY time` +- [#1891](https://github.com/influxdata/influxdb/issues/1891): Wire up COUNT DISTINCT aggregate +- [#1989](https://github.com/influxdata/influxdb/issues/1989): Implement `SELECT tagName FROM m` + +### Bugfixes +- [#2545](https://github.com/influxdata/influxdb/pull/2545): Use "value" as the field name for graphite input. Thanks @cannium. +- [#2558](https://github.com/influxdata/influxdb/pull/2558): Fix client response check - thanks @vladlopes! +- [#2566](https://github.com/influxdata/influxdb/pull/2566): Wait until each data write has been commited by the Raft cluster. +- [#2602](https://github.com/influxdata/influxdb/pull/2602): CLI execute command exits without cleaning up liner package. +- [#2610](https://github.com/influxdata/influxdb/pull/2610): Fix shard group creation +- [#2596](https://github.com/influxdata/influxdb/pull/2596): RC30: `panic: runtime error: index out of range` when insert data points. +- [#2592](https://github.com/influxdata/influxdb/pull/2592): Should return an error if user attempts to group by a field. +- [#2499](https://github.com/influxdata/influxdb/pull/2499): Issuing a select query with tag as a values causes panic. +- [#2612](https://github.com/influxdata/influxdb/pull/2612): Query planner should validate distinct is passed a field. +- [#2531](https://github.com/influxdata/influxdb/issues/2531): Fix select with 3 or more terms in where clause. +- [#2564](https://github.com/influxdata/influxdb/issues/2564): Change "name" to "measurement" in JSON for writes. + +## PRs +- [#2569](https://github.com/influxdata/influxdb/pull/2569): Add derivative functions +- [#2598](https://github.com/influxdata/influxdb/pull/2598): Implement tag support in SELECT statements +- [#2624](https://github.com/influxdata/influxdb/pull/2624): Remove references to SeriesID in `DROP SERIES` handlers. + +## v0.9.0-rc30 [2015-05-12] + +### Release Notes + +This release has a breaking API change for writes -- the field previously called `timestamp` has been renamed to `time`. + +### Features +- [#2254](https://github.com/influxdata/influxdb/pull/2254): Add Support for OpenTSDB HTTP interface. Thanks @tcolgate +- [#2525](https://github.com/influxdata/influxdb/pull/2525): Serve broker diagnostics over HTTP +- [#2186](https://github.com/influxdata/influxdb/pull/2186): The default status code for queries is now `200 OK` +- [#2298](https://github.com/influxdata/influxdb/pull/2298): Successful writes now return a status code of `204 No Content` - thanks @neonstalwart! +- [#2549](https://github.com/influxdata/influxdb/pull/2549): Raft election timeout to 5 seconds, so system is more forgiving of CPU loads. +- [#2568](https://github.com/influxdata/influxdb/pull/2568): Wire up SELECT DISTINCT. + +### Bugfixes +- [#2535](https://github.com/influxdata/influxdb/pull/2535): Return exit status 0 if influxd already running. Thanks @haim0n. +- [#2521](https://github.com/influxdata/influxdb/pull/2521): Don't truncate topic data until fully replicated. +- [#2509](https://github.com/influxdata/influxdb/pull/2509): Parse config file correctly during restore. Thanks @neonstalwart +- [#2536](https://github.com/influxdata/influxdb/issues/2532): Set leader ID on restart of single-node cluster. +- [#2448](https://github.com/influxdata/influxdb/pull/2448): Fix inconsistent data type - thanks @cannium! +- [#2108](https://github.com/influxdata/influxdb/issues/2108): Change `timestamp` to `time` - thanks @neonstalwart! +- [#2539](https://github.com/influxdata/influxdb/issues/2539): Add additional vote request logging. +- [#2541](https://github.com/influxdata/influxdb/issues/2541): Update messaging client connection index with every message. +- [#2542](https://github.com/influxdata/influxdb/issues/2542): Throw parser error for invalid aggregate without where time. +- [#2548](https://github.com/influxdata/influxdb/issues/2548): Return an error when numeric aggregate applied to non-numeric data. +- [#2487](https://github.com/influxdata/influxdb/issues/2487): Aggregate query with exact timestamp causes panic. Thanks @neonstalwart! +- [#2552](https://github.com/influxdata/influxdb/issues/2552): Run CQ that is actually passed into go-routine. +- [#2553](https://github.com/influxdata/influxdb/issues/2553): Fix race condition during CQ execution. +- [#2557](https://github.com/influxdata/influxdb/issues/2557): RC30 WHERE time filter Regression. + +## v0.9.0-rc29 [2015-05-05] + +### Features +- [#2410](https://github.com/influxdata/influxdb/pull/2410): If needed, brokers respond with data nodes for peer shard replication. +- [#2469](https://github.com/influxdata/influxdb/pull/2469): Reduce default max topic size from 1GB to 50MB. +- [#1824](https://github.com/influxdata/influxdb/pull/1824): Wire up MEDIAN aggregate. Thanks @neonstalwart! + +### Bugfixes +- [#2446](https://github.com/influxdata/influxdb/pull/2446): Correctly count number of queries executed. Thanks @neonstalwart +- [#2452](https://github.com/influxdata/influxdb/issues/2452): Fix panic with shard stats on multiple clusters +- [#2453](https://github.com/influxdata/influxdb/pull/2453): Do not require snapshot on Log.WriteEntriesTo(). +- [#2460](https://github.com/influxdata/influxdb/issues/2460): Collectd input should use "value" for fields values. Fixes 2412. Thanks @josh-padnick +- [#2465](https://github.com/influxdata/influxdb/pull/2465): HTTP response logging paniced with chunked requests. Thanks @Jackkoz +- [#2475](https://github.com/influxdata/influxdb/pull/2475): RLock server when checking if shards groups are required during write. +- [#2471](https://github.com/influxdata/influxdb/issues/2471): Function calls normalized to be lower case. Fixes percentile not working when called uppercase. Thanks @neonstalwart +- [#2281](https://github.com/influxdata/influxdb/issues/2281): Fix Bad Escape error when parsing regex + +## v0.9.0-rc28 [2015-04-27] + +### Features +- [#2410](https://github.com/influxdata/influxdb/pull/2410) Allow configuration of Raft timers +- [#2354](https://github.com/influxdata/influxdb/pull/2354) Wire up STDDEV. Thanks @neonstalwart! + +### Bugfixes +- [#2374](https://github.com/influxdata/influxdb/issues/2374): Two different panics during SELECT percentile +- [#2404](https://github.com/influxdata/influxdb/pull/2404): Mean and percentile function fixes +- [#2408](https://github.com/influxdata/influxdb/pull/2408): Fix snapshot 500 error +- [#1896](https://github.com/influxdata/influxdb/issues/1896): Excessive heartbeater logging of "connection refused" on cluster node stop +- [#2418](https://github.com/influxdata/influxdb/pull/2418): Fix raft node getting stuck in candidate state +- [#2415](https://github.com/influxdata/influxdb/pull/2415): Raft leader ID now set on election after failover. Thanks @xiaost +- [#2426](https://github.com/influxdata/influxdb/pull/2426): Fix race condition around listener address in openTSDB server. +- [#2426](https://github.com/influxdata/influxdb/pull/2426): Fix race condition around listener address in Graphite server. +- [#2429](https://github.com/influxdata/influxdb/pull/2429): Ensure no field value is null. +- [#2431](https://github.com/influxdata/influxdb/pull/2431): Always append shard path in diags. Thanks @marcosnils +- [#2441](https://github.com/influxdata/influxdb/pull/2441): Correctly release server RLock during "drop series". +- [#2445](https://github.com/influxdata/influxdb/pull/2445): Read locks and data race fixes + +## v0.9.0-rc27 [04-23-2015] + +### Features +- [#2398](https://github.com/influxdata/influxdb/pull/2398) Track more stats and report errors for shards. + +### Bugfixes +- [#2370](https://github.com/influxdata/influxdb/pull/2370): Fix data race in openTSDB endpoint. +- [#2371](https://github.com/influxdata/influxdb/pull/2371): Don't set client to nil when closing broker Fixes #2352 +- [#2372](https://github.com/influxdata/influxdb/pull/2372): Fix data race in graphite endpoint. +- [#2373](https://github.com/influxdata/influxdb/pull/2373): Actually allow HTTP logging to be controlled. +- [#2376](https://github.com/influxdata/influxdb/pull/2376): Encode all types of integers. Thanks @jtakkala. +- [#2376](https://github.com/influxdata/influxdb/pull/2376): Add shard path to existing diags value. Fix issue #2369. +- [#2386](https://github.com/influxdata/influxdb/pull/2386): Fix shard datanodes stats getting appended too many times +- [#2393](https://github.com/influxdata/influxdb/pull/2393): Fix default hostname for connecting to cluster. +- [#2390](https://github.com/influxdata/influxdb/pull/2390): Handle large sums when calculating means - thanks @neonstalwart! +- [#2391](https://github.com/influxdata/influxdb/pull/2391): Unable to write points through Go client when authentication enabled +- [#2400](https://github.com/influxdata/influxdb/pull/2400): Always send auth headers for client requests if present + +## v0.9.0-rc26 [04-21-2015] + +### Features +- [#2301](https://github.com/influxdata/influxdb/pull/2301): Distributed query load balancing and failover +- [#2336](https://github.com/influxdata/influxdb/pull/2336): Handle distributed queries when shards != data nodes +- [#2353](https://github.com/influxdata/influxdb/pull/2353): Distributed Query/Clustering Fixes + +### Bugfixes +- [#2297](https://github.com/influxdata/influxdb/pull/2297): create /var/run during startup. Thanks @neonstalwart. +- [#2312](https://github.com/influxdata/influxdb/pull/2312): Re-use httpclient for continuous queries +- [#2318](https://github.com/influxdata/influxdb/pull/2318): Remove pointless use of 'done' channel for collectd. +- [#2242](https://github.com/influxdata/influxdb/pull/2242): Distributed Query should balance requests +- [#2243](https://github.com/influxdata/influxdb/pull/2243): Use Limit Reader instead of fixed 1MB/1GB slice for DQ +- [#2190](https://github.com/influxdata/influxdb/pull/2190): Implement failover to other data nodes for distributed queries +- [#2324](https://github.com/influxdata/influxdb/issues/2324): Race in Broker.Close()/Broker.RunContinousQueryProcessing() +- [#2325](https://github.com/influxdata/influxdb/pull/2325): Cluster open fixes +- [#2326](https://github.com/influxdata/influxdb/pull/2326): Fix parse error in CREATE CONTINUOUS QUERY +- [#2300](https://github.com/influxdata/influxdb/pull/2300): Refactor integration tests. Properly close Graphite/OpenTSDB listeners. +- [#2338](https://github.com/influxdata/influxdb/pull/2338): Fix panic if tag key isn't double quoted when it should have been +- [#2340](https://github.com/influxdata/influxdb/pull/2340): Fix SHOW DIAGNOSTICS panic if any shard was non-local. +- [#2351](https://github.com/influxdata/influxdb/pull/2351): Fix data race by rlocking shard during diagnostics. +- [#2348](https://github.com/influxdata/influxdb/pull/2348): Data node fail to join cluster in 0.9.0rc25 +- [#2343](https://github.com/influxdata/influxdb/pull/2343): Node falls behind Metastore updates +- [#2334](https://github.com/influxdata/influxdb/pull/2334): Test Partial replication is very problematic +- [#2272](https://github.com/influxdata/influxdb/pull/2272): clustering: influxdb 0.9.0-rc23 panics when doing a GET with merge_metrics in a +- [#2350](https://github.com/influxdata/influxdb/pull/2350): Issue fix for :influxd -hostname localhost. +- [#2367](https://github.com/influxdata/influxdb/pull/2367): PR for issue #2350 - Always use localhost, not host name. + +## v0.9.0-rc25 [2015-04-15] + +### Bugfixes +- [#2282](https://github.com/influxdata/influxdb/pull/2282): Use "value" as field name for OpenTSDB input. +- [#2283](https://github.com/influxdata/influxdb/pull/2283): Fix bug when restarting an entire existing cluster. +- [#2293](https://github.com/influxdata/influxdb/pull/2293): Open cluster listener before starting broker. +- [#2287](https://github.com/influxdata/influxdb/pull/2287): Fix data race during SHOW RETENTION POLICIES. +- [#2288](https://github.com/influxdata/influxdb/pull/2288): Fix expression parsing bug. +- [#2294](https://github.com/influxdata/influxdb/pull/2294): Fix async response flushing (invalid chunked response error). + +## Features +- [#2276](https://github.com/influxdata/influxdb/pull/2276): Broker topic truncation. +- [#2292](https://github.com/influxdata/influxdb/pull/2292): Wire up drop CQ statement - thanks @neonstalwart! +- [#2290](https://github.com/influxdata/influxdb/pull/2290): Allow hostname argument to override default config - thanks @neonstalwart! +- [#2295](https://github.com/influxdata/influxdb/pull/2295): Use nil as default return value for MapCount - thanks @neonstalwart! +- [#2246](https://github.com/influxdata/influxdb/pull/2246): Allow HTTP logging to be controlled. + +## v0.9.0-rc24 [2015-04-13] + +### Bugfixes +- [#2255](https://github.com/influxdata/influxdb/pull/2255): Fix panic when changing default retention policy. +- [#2257](https://github.com/influxdata/influxdb/pull/2257): Add "snapshotting" pseudo state & log entry cache. +- [#2261](https://github.com/influxdata/influxdb/pull/2261): Support int64 value types. +- [#2191](https://github.com/influxdata/influxdb/pull/2191): Case-insensitive check for "fill" +- [#2274](https://github.com/influxdata/influxdb/pull/2274): Snapshot and HTTP API endpoints +- [#2265](https://github.com/influxdata/influxdb/pull/2265): Fix auth for CLI. + +## v0.9.0-rc23 [2015-04-11] + +### Features +- [#2202](https://github.com/influxdata/influxdb/pull/2202): Initial implementation of Distributed Queries +- [#2202](https://github.com/influxdata/influxdb/pull/2202): 64-bit Series IDs. INCOMPATIBLE WITH PREVIOUS DATASTORES. + +### Bugfixes +- [#2225](https://github.com/influxdata/influxdb/pull/2225): Make keywords completely case insensitive +- [#2228](https://github.com/influxdata/influxdb/pull/2228): Accept keyword default unquoted in ALTER RETENTION POLICY statement +- [#2236](https://github.com/influxdata/influxdb/pull/2236): Immediate term changes, fix stale write issue, net/http/pprof +- [#2213](https://github.com/influxdata/influxdb/pull/2213): Seed random number generator for election timeout. Thanks @cannium. + +## v0.9.0-rc22 [2015-04-09] + +### Features +- [#2214](https://github.com/influxdata/influxdb/pull/2214): Added the option to influx CLI to execute single command and exit. Thanks @n1tr0g + +### Bugfixes +- [#2223](https://github.com/influxdata/influxdb/pull/2223): Always notify term change on RequestVote + +## v0.9.0-rc21 [2015-04-09] + +### Features +- [#870](https://github.com/influxdata/influxdb/pull/870): Add support for OpenTSDB telnet input protocol. Thanks @tcolgate +- [#2180](https://github.com/influxdata/influxdb/pull/2180): Allow http write handler to decode gzipped body +- [#2175](https://github.com/influxdata/influxdb/pull/2175): Separate broker and data nodes +- [#2158](https://github.com/influxdata/influxdb/pull/2158): Allow user password to be changed. Thanks @n1tr0g +- [#2201](https://github.com/influxdata/influxdb/pull/2201): Bring back config join URLs +- [#2121](https://github.com/influxdata/influxdb/pull/2121): Parser refactor + +### Bugfixes +- [#2181](https://github.com/influxdata/influxdb/pull/2181): Fix panic on "SHOW DIAGNOSTICS". +- [#2170](https://github.com/influxdata/influxdb/pull/2170): Make sure queries on missing tags return 200 status. +- [#2197](https://github.com/influxdata/influxdb/pull/2197): Lock server during Open(). +- [#2200](https://github.com/influxdata/influxdb/pull/2200): Re-enable Continuous Queries. +- [#2203](https://github.com/influxdata/influxdb/pull/2203): Fix race condition on continuous queries. +- [#2217](https://github.com/influxdata/influxdb/pull/2217): Only revert to follower if new term is greater. +- [#2219](https://github.com/influxdata/influxdb/pull/2219): Persist term change to disk when candidate. Thanks @cannium + +## v0.9.0-rc20 [2015-04-04] + +### Features +- [#2128](https://github.com/influxdata/influxdb/pull/2128): Data node discovery from brokers +- [#2142](https://github.com/influxdata/influxdb/pull/2142): Support chunked queries +- [#2154](https://github.com/influxdata/influxdb/pull/2154): Node redirection +- [#2168](https://github.com/influxdata/influxdb/pull/2168): Return raft term from vote, add term logging + +### Bugfixes +- [#2147](https://github.com/influxdata/influxdb/pull/2147): Set Go Max procs in a better location +- [#2137](https://github.com/influxdata/influxdb/pull/2137): Refactor `results` to `response`. Breaking Go Client change. +- [#2151](https://github.com/influxdata/influxdb/pull/2151): Ignore replay commands on the metastore. +- [#2152](https://github.com/influxdata/influxdb/issues/2152): Influxd process with stats enabled crashing with 'Unsuported protocol scheme for ""' +- [#2156](https://github.com/influxdata/influxdb/pull/2156): Propagate error when resolving UDP address in Graphite UDP server. +- [#2163](https://github.com/influxdata/influxdb/pull/2163): Fix up paths for default data and run storage. +- [#2164](https://github.com/influxdata/influxdb/pull/2164): Append STDOUT/STDERR in initscript. +- [#2165](https://github.com/influxdata/influxdb/pull/2165): Better name for config section for stats and diags. +- [#2165](https://github.com/influxdata/influxdb/pull/2165): Monitoring database and retention policy are not configurable. +- [#2167](https://github.com/influxdata/influxdb/pull/2167): Add broker log recovery. +- [#2166](https://github.com/influxdata/influxdb/pull/2166): Don't panic if presented with a field of unknown type. +- [#2149](https://github.com/influxdata/influxdb/pull/2149): Fix unit tests for win32 when directory doesn't exist. +- [#2150](https://github.com/influxdata/influxdb/pull/2150): Fix unit tests for win32 when a connection is refused. + +## v0.9.0-rc19 [2015-04-01] + +### Features +- [#2143](https://github.com/influxdata/influxdb/pull/2143): Add raft term logging. + +### Bugfixes +- [#2145](https://github.com/influxdata/influxdb/pull/2145): Encode toml durations correctly which fixes default configuration generation `influxd config`. + +## v0.9.0-rc18 [2015-03-31] + +### Bugfixes +- [#2100](https://github.com/influxdata/influxdb/pull/2100): Use channel to synchronize collectd shutdown. +- [#2100](https://github.com/influxdata/influxdb/pull/2100): Synchronize access to shard index. +- [#2131](https://github.com/influxdata/influxdb/pull/2131): Optimize marshalTags(). +- [#2130](https://github.com/influxdata/influxdb/pull/2130): Make fewer calls to marshalTags(). +- [#2105](https://github.com/influxdata/influxdb/pull/2105): Support != for tag values. Fix issue #2097, thanks to @smonkewitz for bug report. +- [#2105](https://github.com/influxdata/influxdb/pull/2105): Support !~ tags values. +- [#2138](https://github.com/influxdata/influxdb/pull/2136): Use map for marshaledTags cache. + +## v0.9.0-rc17 [2015-03-29] + +### Features +- [#2076](https://github.com/influxdata/influxdb/pull/2076): Separate stdout and stderr output in init.d script +- [#2091](https://github.com/influxdata/influxdb/pull/2091): Support disabling snapshot endpoint. +- [#2081](https://github.com/influxdata/influxdb/pull/2081): Support writing diagnostic data into the internal database. +- [#2095](https://github.com/influxdata/influxdb/pull/2095): Improved InfluxDB client docs. Thanks @derailed + +### Bugfixes +- [#2093](https://github.com/influxdata/influxdb/pull/2093): Point precision not marshalled correctly. Thanks @derailed +- [#2084](https://github.com/influxdata/influxdb/pull/2084): Allowing leading underscores in identifiers. +- [#2080](https://github.com/influxdata/influxdb/pull/2080): Graphite logs in seconds, not milliseconds. +- [#2101](https://github.com/influxdata/influxdb/pull/2101): SHOW DATABASES should name returned series "databases". +- [#2104](https://github.com/influxdata/influxdb/pull/2104): Include NEQ when calculating field filters. +- [#2112](https://github.com/influxdata/influxdb/pull/2112): Set GOMAXPROCS on startup. This may have been causing extra leader elections, which would cause a number of other bugs or instability. +- [#2111](https://github.com/influxdata/influxdb/pull/2111) and [#2025](https://github.com/influxdata/influxdb/issues/2025): Raft stability fixes. Non-contiguous log error and others. +- [#2114](https://github.com/influxdata/influxdb/pull/2114): Correctly start influxd on platforms without start-stop-daemon. + +## v0.9.0-rc16 [2015-03-24] + +### Features +- [#2058](https://github.com/influxdata/influxdb/pull/2058): Track number of queries executed in stats. +- [#2059](https://github.com/influxdata/influxdb/pull/2059): Retention policies sorted by name on return to client. +- [#2061](https://github.com/influxdata/influxdb/pull/2061): Implement SHOW DIAGNOSTICS. +- [#2064](https://github.com/influxdata/influxdb/pull/2064): Allow init.d script to return influxd version. +- [#2053](https://github.com/influxdata/influxdb/pull/2053): Implment backup and restore. +- [#1631](https://github.com/influxdata/influxdb/pull/1631): Wire up DROP CONTINUOUS QUERY. + +### Bugfixes +- [#2037](https://github.com/influxdata/influxdb/pull/2037): Don't check 'configExists' at Run() level. +- [#2039](https://github.com/influxdata/influxdb/pull/2039): Don't panic if getting current user fails. +- [#2034](https://github.com/influxdata/influxdb/pull/2034): GROUP BY should require an aggregate. +- [#2040](https://github.com/influxdata/influxdb/pull/2040): Add missing top-level help for config command. +- [#2057](https://github.com/influxdata/influxdb/pull/2057): Move racy "in order" test to integration test suite. +- [#2060](https://github.com/influxdata/influxdb/pull/2060): Reload server shard map on restart. +- [#2068](https://github.com/influxdata/influxdb/pull/2068): Fix misspelled JSON field. +- [#2067](https://github.com/influxdata/influxdb/pull/2067): Fixed issue where some queries didn't properly pull back data (introduced in RC15). Fixing intervals for GROUP BY. + +## v0.9.0-rc15 [2015-03-19] + +### Features +- [#2000](https://github.com/influxdata/influxdb/pull/2000): Log broker path when broker fails to start. Thanks @gst. +- [#2007](https://github.com/influxdata/influxdb/pull/2007): Track shard-level stats. + +### Bugfixes +- [#2001](https://github.com/influxdata/influxdb/pull/2001): Ensure measurement not found returns status code 200. +- [#1985](https://github.com/influxdata/influxdb/pull/1985): Set content-type JSON header before actually writing header. Thanks @dstrek. +- [#2003](https://github.com/influxdata/influxdb/pull/2003): Set timestamp when writing monitoring stats. +- [#2004](https://github.com/influxdata/influxdb/pull/2004): Limit group by to MaxGroupByPoints (currently 100,000). +- [#2016](https://github.com/influxdata/influxdb/pull/2016): Fixing bucket alignment for group by. Thanks @jnutzmann +- [#2021](https://github.com/influxdata/influxdb/pull/2021): Remove unnecessary formatting from log message. Thanks @simonkern + + +## v0.9.0-rc14 [2015-03-18] + +### Bugfixes +- [#1999](https://github.com/influxdata/influxdb/pull/1999): Return status code 200 for measurement not found errors on show series. + +## v0.9.0-rc13 [2015-03-17] + +### Features +- [#1974](https://github.com/influxdata/influxdb/pull/1974): Add time taken for request to the http server logs. + +### Bugfixes +- [#1971](https://github.com/influxdata/influxdb/pull/1971): Fix leader id initialization. +- [#1975](https://github.com/influxdata/influxdb/pull/1975): Require `q` parameter for query endpoint. +- [#1969](https://github.com/influxdata/influxdb/pull/1969): Print loaded config. +- [#1987](https://github.com/influxdata/influxdb/pull/1987): Fix config print startup statement for when no config is provided. +- [#1990](https://github.com/influxdata/influxdb/pull/1990): Drop measurement was taking too long due to transactions. + +## v0.9.0-rc12 [2015-03-15] + +### Bugfixes +- [#1942](https://github.com/influxdata/influxdb/pull/1942): Sort wildcard names. +- [#1957](https://github.com/influxdata/influxdb/pull/1957): Graphite numbers are always float64. +- [#1955](https://github.com/influxdata/influxdb/pull/1955): Prohibit creation of databases with no name. Thanks @dullgiulio +- [#1952](https://github.com/influxdata/influxdb/pull/1952): Handle delete statement with an error. Thanks again to @dullgiulio + +### Features +- [#1935](https://github.com/influxdata/influxdb/pull/1935): Implement stateless broker for Raft. +- [#1936](https://github.com/influxdata/influxdb/pull/1936): Implement "SHOW STATS" and self-monitoring + +### Features +- [#1909](https://github.com/influxdata/influxdb/pull/1909): Implement a dump command. + +## v0.9.0-rc11 [2015-03-13] + +### Bugfixes +- [#1917](https://github.com/influxdata/influxdb/pull/1902): Creating Infinite Retention Policy Failed. +- [#1758](https://github.com/influxdata/influxdb/pull/1758): Add Graphite Integration Test. +- [#1929](https://github.com/influxdata/influxdb/pull/1929): Default Retention Policy incorrectly auto created. +- [#1930](https://github.com/influxdata/influxdb/pull/1930): Auto create database for graphite if not specified. +- [#1908](https://github.com/influxdata/influxdb/pull/1908): Cosmetic CLI output fixes. +- [#1931](https://github.com/influxdata/influxdb/pull/1931): Add default column to SHOW RETENTION POLICIES. +- [#1937](https://github.com/influxdata/influxdb/pull/1937): OFFSET should be allowed to be 0. + +### Features +- [#1902](https://github.com/influxdata/influxdb/pull/1902): Enforce retention policies to have a minimum duration. +- [#1906](https://github.com/influxdata/influxdb/pull/1906): Add show servers to query language. +- [#1925](https://github.com/influxdata/influxdb/pull/1925): Add `fill(none)`, `fill(previous)`, and `fill()` to queries. + +## v0.9.0-rc10 [2015-03-09] + +### Bugfixes +- [#1867](https://github.com/influxdata/influxdb/pull/1867): Fix race accessing topic replicas map +- [#1864](https://github.com/influxdata/influxdb/pull/1864): fix race in startStateLoop +- [#1753](https://github.com/influxdata/influxdb/pull/1874): Do Not Panic on Missing Dirs +- [#1877](https://github.com/influxdata/influxdb/pull/1877): Broker clients track broker leader +- [#1862](https://github.com/influxdata/influxdb/pull/1862): Fix memory leak in `httpd.serveWait`. Thanks @mountkin +- [#1883](https://github.com/influxdata/influxdb/pull/1883): RLock server during retention policy enforcement. Thanks @grisha +- [#1868](https://github.com/influxdata/influxdb/pull/1868): Use `BatchPoints` for `client.Write` method. Thanks @vladlopes, @georgmu, @d2g, @evanphx, @akolosov. +- [#1881](https://github.com/influxdata/influxdb/pull/1881): Update documentation for `client` package. Misc library tweaks. +- Fix queries with multiple where clauses on tags, times and fields. Fix queries that have where clauses on fields not in the select + +### Features +- [#1875](https://github.com/influxdata/influxdb/pull/1875): Support trace logging of Raft. +- [#1895](https://github.com/influxdata/influxdb/pull/1895): Auto-create a retention policy when a database is created. +- [#1897](https://github.com/influxdata/influxdb/pull/1897): Pre-create shard groups. +- [#1900](https://github.com/influxdata/influxdb/pull/1900): Change `LIMIT` to `SLIMIT` and implement `LIMIT` and `OFFSET` + +## v0.9.0-rc9 [2015-03-06] + +### Bugfixes +- [#1872](https://github.com/influxdata/influxdb/pull/1872): Fix "stale term" errors with raft + +## v0.9.0-rc8 [2015-03-05] + +### Bugfixes +- [#1836](https://github.com/influxdata/influxdb/pull/1836): Store each parsed shell command in history file. +- [#1789](https://github.com/influxdata/influxdb/pull/1789): add --config-files option to fpm command. Thanks @kylezh +- [#1859](https://github.com/influxdata/influxdb/pull/1859): Queries with a `GROUP BY *` clause were returning a 500 if done against a measurement that didn't exist + +### Features +- [#1755](https://github.com/influxdata/influxdb/pull/1848): Support JSON data ingest over UDP +- [#1857](https://github.com/influxdata/influxdb/pull/1857): Support retention policies with infinite duration +- [#1858](https://github.com/influxdata/influxdb/pull/1858): Enable detailed tracing of write path + +## v0.9.0-rc7 [2015-03-02] + +### Features +- [#1813](https://github.com/influxdata/influxdb/pull/1813): Queries for missing measurements or fields now return a 200 with an error message in the series JSON. +- [#1826](https://github.com/influxdata/influxdb/pull/1826), [#1827](https://github.com/influxdata/influxdb/pull/1827): Fixed queries with `WHERE` clauses against fields. + +### Bugfixes + +- [#1744](https://github.com/influxdata/influxdb/pull/1744): Allow retention policies to be modified without specifying replication factor. Thanks @kylezh +- [#1809](https://github.com/influxdata/influxdb/pull/1809): Packaging post-install script unconditionally removes init.d symlink. Thanks @sineos + +## v0.9.0-rc6 [2015-02-27] + +### Bugfixes + +- [#1780](https://github.com/influxdata/influxdb/pull/1780): Malformed identifiers get through the parser +- [#1775](https://github.com/influxdata/influxdb/pull/1775): Panic "index out of range" on some queries +- [#1744](https://github.com/influxdata/influxdb/pull/1744): Select shard groups which completely encompass time range. Thanks @kylezh. + +## v0.9.0-rc5 [2015-02-27] + +### Bugfixes + +- [#1752](https://github.com/influxdata/influxdb/pull/1752): remove debug log output from collectd. +- [#1720](https://github.com/influxdata/influxdb/pull/1720): Parse Series IDs as unsigned 32-bits. +- [#1767](https://github.com/influxdata/influxdb/pull/1767): Drop Series was failing across shards. Issue #1761. +- [#1773](https://github.com/influxdata/influxdb/pull/1773): Fix bug when merging series together that have unequal number of points in a group by interval +- [#1771](https://github.com/influxdata/influxdb/pull/1771): Make `SHOW SERIES` return IDs and support `LIMIT` and `OFFSET` + +### Features + +- [#1698](https://github.com/influxdata/influxdb/pull/1698): Wire up DROP MEASUREMENT + +## v0.9.0-rc4 [2015-02-24] + +### Bugfixes + +- Fix authentication issue with continuous queries +- Print version in the log on startup + +## v0.9.0-rc3 [2015-02-23] + +### Features + +- [#1659](https://github.com/influxdata/influxdb/pull/1659): WHERE against regexes: `WHERE =~ '.*asdf' +- [#1580](https://github.com/influxdata/influxdb/pull/1580): Add support for fields with bool, int, or string data types +- [#1687](https://github.com/influxdata/influxdb/pull/1687): Change `Rows` to `Series` in results output. BREAKING API CHANGE +- [#1629](https://github.com/influxdata/influxdb/pull/1629): Add support for `DROP SERIES` queries +- [#1632](https://github.com/influxdata/influxdb/pull/1632): Add support for `GROUP BY *` to return all series within a measurement +- [#1689](https://github.com/influxdata/influxdb/pull/1689): Change `SHOW TAG VALUES WITH KEY="foo"` to use the key name in the result. BREAKING API CHANGE +- [#1699](https://github.com/influxdata/influxdb/pull/1699): Add CPU and memory profiling options to daemon +- [#1672](https://github.com/influxdata/influxdb/pull/1672): Add index tracking to metastore. Makes downed node recovery actually work +- [#1591](https://github.com/influxdata/influxdb/pull/1591): Add `spread` aggregate function +- [#1576](https://github.com/influxdata/influxdb/pull/1576): Add `first` and `last` aggregate functions +- [#1573](https://github.com/influxdata/influxdb/pull/1573): Add `stddev` aggregate function +- [#1565](https://github.com/influxdata/influxdb/pull/1565): Add the admin interface back into the server and update for new API +- [#1562](https://github.com/influxdata/influxdb/pull/1562): Enforce retention policies +- [#1700](https://github.com/influxdata/influxdb/pull/1700): Change `Values` to `Fields` on writes. BREAKING API CHANGE +- [#1706](https://github.com/influxdata/influxdb/pull/1706): Add support for `LIMIT` and `OFFSET`, which work on the number of series returned in a query. To limit the number of data points use a `WHERE time` clause + +### Bugfixes + +- [#1636](https://github.com/influxdata/influxdb/issues/1636): Don't store number of fields in raw data. THIS IS A BREAKING DATA CHANGE. YOU MUST START WITH A FRESH DATABASE +- [#1701](https://github.com/influxdata/influxdb/pull/1701), [#1667](https://github.com/influxdata/influxdb/pull/1667), [#1663](https://github.com/influxdata/influxdb/pull/1663), [#1615](https://github.com/influxdata/influxdb/pull/1615): Raft fixes +- [#1644](https://github.com/influxdata/influxdb/pull/1644): Add batching support for significantly improved write performance +- [#1704](https://github.com/influxdata/influxdb/pull/1704): Fix queries that pull back raw data (i.e. ones without aggregate functions) +- [#1718](https://github.com/influxdata/influxdb/pull/1718): Return an error on write if any of the points are don't have at least one field +- [#1806](https://github.com/influxdata/influxdb/pull/1806): Fix regex parsing. Change regex syntax to use / delimiters. + + +## v0.9.0-rc1,2 [no public release] + +### Features + +- Support for tags added +- New queries for showing measurement names, tag keys, and tag values +- Renamed shard spaces to retention policies +- Deprecated matching against regex in favor of explicit writing and querying on retention policies +- Pure Go InfluxQL parser +- Switch to BoltDB as underlying datastore +- BoltDB backed metastore to store schema information +- Updated HTTP API to only have two endpoints `/query` and `/write` +- Added all administrative functions to the query language +- Change cluster architecture to have brokers and data nodes +- Switch to streaming Raft implementation +- In memory inverted index of the tag data +- Pure Go implementation! + +## v0.8.6 [2014-11-15] + +### Features + +- [Issue #973](https://github.com/influxdata/influxdb/issues/973). Support + joining using a regex or list of time series +- [Issue #1068](https://github.com/influxdata/influxdb/issues/1068). Print + the processor chain when the query is started + +### Bugfixes + +- [Issue #584](https://github.com/influxdata/influxdb/issues/584). Don't + panic if the process died while initializing +- [Issue #663](https://github.com/influxdata/influxdb/issues/663). Make + sure all sub servies are closed when are stopping InfluxDB +- [Issue #671](https://github.com/influxdata/influxdb/issues/671). Fix + the Makefile package target for Mac OSX +- [Issue #800](https://github.com/influxdata/influxdb/issues/800). Use + su instead of sudo in the init script. This fixes the startup problem + on RHEL 6. +- [Issue #925](https://github.com/influxdata/influxdb/issues/925). Don't + generate invalid query strings for single point queries +- [Issue #943](https://github.com/influxdata/influxdb/issues/943). Don't + take two snapshots at the same time +- [Issue #947](https://github.com/influxdata/influxdb/issues/947). Exit + nicely if the daemon doesn't have permission to write to the log. +- [Issue #959](https://github.com/influxdata/influxdb/issues/959). Stop using + closed connections in the protobuf client. +- [Issue #978](https://github.com/influxdata/influxdb/issues/978). Check + for valgrind and mercurial in the configure script +- [Issue #996](https://github.com/influxdata/influxdb/issues/996). Fill should + fill the time range even if no points exists in the given time range +- [Issue #1008](https://github.com/influxdata/influxdb/issues/1008). Return + an appropriate exit status code depending on whether the process exits + due to an error or exits gracefully. +- [Issue #1024](https://github.com/influxdata/influxdb/issues/1024). Hitting + open files limit causes influxdb to create shards in loop. +- [Issue #1069](https://github.com/influxdata/influxdb/issues/1069). Fix + deprecated interface endpoint in Admin UI. +- [Issue #1076](https://github.com/influxdata/influxdb/issues/1076). Fix + the timestamps of data points written by the collectd plugin. (Thanks, + @renchap for reporting this bug) +- [Issue #1078](https://github.com/influxdata/influxdb/issues/1078). Make sure + we don't resurrect shard directories for shards that have already expired +- [Issue #1085](https://github.com/influxdata/influxdb/issues/1085). Set + the connection string of the local raft node +- [Issue #1092](https://github.com/influxdata/influxdb/issues/1093). Set + the connection string of the local node in the raft snapshot. +- [Issue #1100](https://github.com/influxdata/influxdb/issues/1100). Removing + a non-existent shard space causes the cluster to panic. +- [Issue #1113](https://github.com/influxdata/influxdb/issues/1113). A nil + engine.ProcessorChain causes a panic. + +## v0.8.5 [2014-10-27] + +### Features + +- [Issue #1055](https://github.com/influxdata/influxdb/issues/1055). Allow + graphite and collectd input plugins to have separate binding address + +### Bugfixes + +- [Issue #1058](https://github.com/influxdata/influxdb/issues/1058). Use + the query language instead of the continuous query endpoints that + were removed in 0.8.4 +- [Issue #1022](https://github.com/influxdata/influxdb/issues/1022). Return + an +Inf or NaN instead of panicing when we encounter a divide by zero +- [Issue #821](https://github.com/influxdata/influxdb/issues/821). Don't + scan through points when we hit the limit +- [Issue #1051](https://github.com/influxdata/influxdb/issues/1051). Fix + timestamps when the collectd is used and low resolution timestamps + is set. + +## v0.8.4 [2014-10-24] + +### Bugfixes + +- Remove the continuous query api endpoints since the query language + has all the features needed to list and delete continuous queries. +- [Issue #778](https://github.com/influxdata/influxdb/issues/778). Selecting + from a non-existent series should give a better error message indicating + that the series doesn't exist +- [Issue #988](https://github.com/influxdata/influxdb/issues/988). Check + the arguments of `top()` and `bottom()` +- [Issue #1021](https://github.com/influxdata/influxdb/issues/1021). Make + redirecting to standard output and standard error optional instead of + going to `/dev/null`. This can now be configured by setting `$STDOUT` + in `/etc/default/influxdb` +- [Issue #985](https://github.com/influxdata/influxdb/issues/985). Make + sure we drop a shard only when there's no one using it. Otherwise, the + shard can be closed when another goroutine is writing to it which will + cause random errors and possibly corruption of the database. + +### Features + +- [Issue #1047](https://github.com/influxdata/influxdb/issues/1047). Allow + merge() to take a list of series (as opposed to a regex in #72) + +## v0.8.4-rc.1 [2014-10-21] + +### Bugfixes + +- [Issue #1040](https://github.com/influxdata/influxdb/issues/1040). Revert + to older raft snapshot if the latest one is corrupted +- [Issue #1004](https://github.com/influxdata/influxdb/issues/1004). Querying + for data outside of existing shards returns an empty response instead of + throwing a `Couldn't lookup columns` error +- [Issue #1020](https://github.com/influxdata/influxdb/issues/1020). Change + init script exit codes to conform to the lsb standards. (Thanks, @spuder) +- [Issue #1011](https://github.com/influxdata/influxdb/issues/1011). Fix + the tarball for homebrew so that rocksdb is included and the directory + structure is clean +- [Issue #1007](https://github.com/influxdata/influxdb/issues/1007). Fix + the content type when an error occurs and the client requests + compression. +- [Issue #916](https://github.com/influxdata/influxdb/issues/916). Set + the ulimit in the init script with a way to override the limit +- [Issue #742](https://github.com/influxdata/influxdb/issues/742). Fix + rocksdb for Mac OSX +- [Issue #387](https://github.com/influxdata/influxdb/issues/387). Aggregations + with group by time(1w), time(1m) and time(1y) (for week, month and + year respectively) will cause the start time and end time of the bucket + to fall on the logical boundaries of the week, month or year. +- [Issue #334](https://github.com/influxdata/influxdb/issues/334). Derivative + for queries with group by time() and fill(), will take the difference + between the first value in the bucket and the first value of the next + bucket. +- [Issue #972](https://github.com/influxdata/influxdb/issues/972). Don't + assign duplicate server ids + +### Features + +- [Issue #722](https://github.com/influxdata/influxdb/issues/722). Add + an install target to the Makefile +- [Issue #1032](https://github.com/influxdata/influxdb/issues/1032). Include + the admin ui static assets in the binary +- [Issue #1019](https://github.com/influxdata/influxdb/issues/1019). Upgrade + to rocksdb 3.5.1 +- [Issue #992](https://github.com/influxdata/influxdb/issues/992). Add + an input plugin for collectd. (Thanks, @kimor79) +- [Issue #72](https://github.com/influxdata/influxdb/issues/72). Support merge + for multiple series using regex syntax + +## v0.8.3 [2014-09-24] + +### Bugfixes + +- [Issue #885](https://github.com/influxdata/influxdb/issues/885). Multiple + queries separated by semicolons work as expected. Queries are process + sequentially +- [Issue #652](https://github.com/influxdata/influxdb/issues/652). Return an + error if an invalid column is used in the where clause +- [Issue #794](https://github.com/influxdata/influxdb/issues/794). Fix case + insensitive regex matching +- [Issue #853](https://github.com/influxdata/influxdb/issues/853). Move + cluster config from raft to API. +- [Issue #714](https://github.com/influxdata/influxdb/issues/714). Don't + panic on invalid boolean operators. +- [Issue #843](https://github.com/influxdata/influxdb/issues/843). Prevent blank database names +- [Issue #780](https://github.com/influxdata/influxdb/issues/780). Fix + fill() for all aggregators +- [Issue #923](https://github.com/influxdata/influxdb/issues/923). Enclose + table names in double quotes in the result of GetQueryString() +- [Issue #923](https://github.com/influxdata/influxdb/issues/923). Enclose + table names in double quotes in the result of GetQueryString() +- [Issue #967](https://github.com/influxdata/influxdb/issues/967). Return an + error if the storage engine can't be created +- [Issue #954](https://github.com/influxdata/influxdb/issues/954). Don't automatically + create shards which was causing too many shards to be created when used with + grafana +- [Issue #939](https://github.com/influxdata/influxdb/issues/939). Aggregation should + ignore null values and invalid values, e.g. strings with mean(). +- [Issue #964](https://github.com/influxdata/influxdb/issues/964). Parse + big int in queries properly. + +## v0.8.2 [2014-09-05] + +### Bugfixes + +- [Issue #886](https://github.com/influxdata/influxdb/issues/886). Update shard space to not set defaults + +- [Issue #867](https://github.com/influxdata/influxdb/issues/867). Add option to return shard space mappings in list series + +### Bugfixes + +- [Issue #652](https://github.com/influxdata/influxdb/issues/652). Return + a meaningful error if an invalid column is used in where clause + after joining multiple series + +## v0.8.2 [2014-09-08] + +### Features + +- Added API endpoint to update shard space definitions + +### Bugfixes + +- [Issue #886](https://github.com/influxdata/influxdb/issues/886). Shard space regexes reset after restart of InfluxDB + +## v0.8.1 [2014-09-03] + +- [Issue #896](https://github.com/influxdata/influxdb/issues/896). Allow logging to syslog. Thanks @malthe + +### Bugfixes + +- [Issue #868](https://github.com/influxdata/influxdb/issues/868). Don't panic when upgrading a snapshot from 0.7.x +- [Issue #887](https://github.com/influxdata/influxdb/issues/887). The first continuous query shouldn't trigger backfill if it had backfill disabled +- [Issue #674](https://github.com/influxdata/influxdb/issues/674). Graceful exit when config file is invalid. (Thanks, @DavidBord) +- [Issue #857](https://github.com/influxdata/influxdb/issues/857). More informative list servers api. (Thanks, @oliveagle) + +## v0.8.0 [2014-08-22] + +### Features + +- [Issue #850](https://github.com/influxdata/influxdb/issues/850). Makes the server listing more informative + +### Bugfixes + +- [Issue #779](https://github.com/influxdata/influxdb/issues/779). Deleting expired shards isn't thread safe. +- [Issue #860](https://github.com/influxdata/influxdb/issues/860). Load database config should validate shard spaces. +- [Issue #862](https://github.com/influxdata/influxdb/issues/862). Data migrator should have option to set delay time. + +## v0.8.0-rc.5 [2014-08-15] + +### Features + +- [Issue #376](https://github.com/influxdata/influxdb/issues/376). List series should support regex filtering +- [Issue #745](https://github.com/influxdata/influxdb/issues/745). Add continuous queries to the database config +- [Issue #746](https://github.com/influxdata/influxdb/issues/746). Add data migration tool for 0.8.0 + +### Bugfixes + +- [Issue #426](https://github.com/influxdata/influxdb/issues/426). Fill should fill the entire time range that is requested +- [Issue #740](https://github.com/influxdata/influxdb/issues/740). Don't emit non existent fields when joining series with different fields +- [Issue #744](https://github.com/influxdata/influxdb/issues/744). Admin site should have all assets locally +- [Issue #767](https://github.com/influxdata/influxdb/issues/768). Remove shards whenever they expire +- [Issue #781](https://github.com/influxdata/influxdb/issues/781). Don't emit non existent fields when joining series with different fields +- [Issue #791](https://github.com/influxdata/influxdb/issues/791). Move database config loader to be an API endpoint +- [Issue #809](https://github.com/influxdata/influxdb/issues/809). Migration path from 0.7 -> 0.8 +- [Issue #811](https://github.com/influxdata/influxdb/issues/811). Gogoprotobuf removed `ErrWrongType`, which is depended on by Raft +- [Issue #820](https://github.com/influxdata/influxdb/issues/820). Query non-local shard with time range to avoid getting back points not in time range +- [Issue #827](https://github.com/influxdata/influxdb/issues/827). Don't leak file descriptors in the WAL +- [Issue #830](https://github.com/influxdata/influxdb/issues/830). List series should return series in lexicographic sorted order +- [Issue #831](https://github.com/influxdata/influxdb/issues/831). Move create shard space to be db specific + +## v0.8.0-rc.4 [2014-07-29] + +### Bugfixes + +- [Issue #774](https://github.com/influxdata/influxdb/issues/774). Don't try to parse "inf" shard retention policy +- [Issue #769](https://github.com/influxdata/influxdb/issues/769). Use retention duration when determining expired shards. (Thanks, @shugo) +- [Issue #736](https://github.com/influxdata/influxdb/issues/736). Only db admins should be able to drop a series +- [Issue #713](https://github.com/influxdata/influxdb/issues/713). Null should be a valid fill value +- [Issue #644](https://github.com/influxdata/influxdb/issues/644). Graphite api should write data in batches to the coordinator +- [Issue #740](https://github.com/influxdata/influxdb/issues/740). Panic when distinct fields are selected from an inner join +- [Issue #781](https://github.com/influxdata/influxdb/issues/781). Panic when distinct fields are added after an inner join + +## v0.8.0-rc.3 [2014-07-21] + +### Bugfixes + +- [Issue #752](https://github.com/influxdata/influxdb/issues/752). `./configure` should use goroot to find gofmt +- [Issue #758](https://github.com/influxdata/influxdb/issues/758). Clarify the reason behind graphite input plugin not starting. (Thanks, @otoolep) +- [Issue #759](https://github.com/influxdata/influxdb/issues/759). Don't revert the regex in the shard space. (Thanks, @shugo) +- [Issue #760](https://github.com/influxdata/influxdb/issues/760). Removing a server should remove it from the shard server ids. (Thanks, @shugo) +- [Issue #772](https://github.com/influxdata/influxdb/issues/772). Add sentinel values to all db. This caused the last key in the db to not be fetched properly. + + +## v0.8.0-rc.2 [2014-07-15] + +- This release is to fix a build error in rc1 which caused rocksdb to not be available +- Bump up the `max-open-files` option to 1000 on all storage engines +- Lower the `write-buffer-size` to 1000 + +## v0.8.0-rc.1 [2014-07-15] + +### Features + +- [Issue #643](https://github.com/influxdata/influxdb/issues/643). Support pretty print json. (Thanks, @otoolep) +- [Issue #641](https://github.com/influxdata/influxdb/issues/641). Support multiple storage engines +- [Issue #665](https://github.com/influxdata/influxdb/issues/665). Make build tmp directory configurable in the make file. (Thanks, @dgnorton) +- [Issue #667](https://github.com/influxdata/influxdb/issues/667). Enable compression on all GET requests and when writing data +- [Issue #648](https://github.com/influxdata/influxdb/issues/648). Return permissions when listing db users. (Thanks, @nicolai86) +- [Issue #682](https://github.com/influxdata/influxdb/issues/682). Allow continuous queries to run without backfill (Thanks, @dhammika) +- [Issue #689](https://github.com/influxdata/influxdb/issues/689). **REQUIRES DATA MIGRATION** Move metadata into raft +- [Issue #255](https://github.com/influxdata/influxdb/issues/255). Support millisecond precision using `ms` suffix +- [Issue #95](https://github.com/influxdata/influxdb/issues/95). Drop database should not be synchronous +- [Issue #571](https://github.com/influxdata/influxdb/issues/571). Add support for arbitrary number of shard spaces and retention policies +- Default storage engine changed to RocksDB + +### Bugfixes + +- [Issue #651](https://github.com/influxdata/influxdb/issues/651). Change permissions of symlink which fix some installation issues. (Thanks, @Dieterbe) +- [Issue #670](https://github.com/influxdata/influxdb/issues/670). Don't warn on missing influxdb user on fresh installs +- [Issue #676](https://github.com/influxdata/influxdb/issues/676). Allow storing high precision integer values without losing any information +- [Issue #695](https://github.com/influxdata/influxdb/issues/695). Prevent having duplicate field names in the write payload. (Thanks, @seunglee150) +- [Issue #731](https://github.com/influxdata/influxdb/issues/731). Don't enable the udp plugin if the `enabled` option is set to false +- [Issue #733](https://github.com/influxdata/influxdb/issues/733). Print an `INFO` message when the input plugin is disabled +- [Issue #707](https://github.com/influxdata/influxdb/issues/707). Graphite input plugin should work payload delimited by any whitespace character +- [Issue #734](https://github.com/influxdata/influxdb/issues/734). Don't buffer non replicated writes +- [Issue #465](https://github.com/influxdata/influxdb/issues/465). Recreating a currently deleting db or series doesn't bring back the old data anymore +- [Issue #358](https://github.com/influxdata/influxdb/issues/358). **BREAKING** List series should return as a single series +- [Issue #499](https://github.com/influxdata/influxdb/issues/499). **BREAKING** Querying non-existent database or series will return an error +- [Issue #570](https://github.com/influxdata/influxdb/issues/570). InfluxDB crashes during delete/drop of database +- [Issue #592](https://github.com/influxdata/influxdb/issues/592). Drop series is inefficient + +## v0.7.3 [2014-06-13] + +### Bugfixes + +- [Issue #637](https://github.com/influxdata/influxdb/issues/637). Truncate log files if the last request wasn't written properly +- [Issue #646](https://github.com/influxdata/influxdb/issues/646). CRITICAL: Duplicate shard ids for new shards if old shards are deleted. + +## v0.7.2 [2014-05-30] + +### Features + +- [Issue #521](https://github.com/influxdata/influxdb/issues/521). MODE works on all datatypes (Thanks, @richthegeek) + +### Bugfixes + +- [Issue #418](https://github.com/influxdata/influxdb/pull/418). Requests or responses larger than MAX_REQUEST_SIZE break things. +- [Issue #606](https://github.com/influxdata/influxdb/issues/606). InfluxDB will fail to start with invalid permission if log.txt didn't exist +- [Issue #602](https://github.com/influxdata/influxdb/issues/602). Merge will fail to work across shards + +### Features + +## v0.7.1 [2014-05-29] + +### Bugfixes + +- [Issue #579](https://github.com/influxdata/influxdb/issues/579). Reject writes to nonexistent databases +- [Issue #597](https://github.com/influxdata/influxdb/issues/597). Force compaction after deleting data + +### Features + +- [Issue #476](https://github.com/influxdata/influxdb/issues/476). Support ARM architecture +- [Issue #578](https://github.com/influxdata/influxdb/issues/578). Support aliasing for expressions in parenthesis +- [Issue #544](https://github.com/influxdata/influxdb/pull/544). Support forcing node removal from a cluster +- [Issue #591](https://github.com/influxdata/influxdb/pull/591). Support multiple udp input plugins (Thanks, @tpitale) +- [Issue #600](https://github.com/influxdata/influxdb/pull/600). Report version, os, arch, and raftName once per day. + +## v0.7.0 [2014-05-23] + +### Bugfixes + +- [Issue #557](https://github.com/influxdata/influxdb/issues/557). Group by time(1y) doesn't work while time(365d) works +- [Issue #547](https://github.com/influxdata/influxdb/issues/547). Add difference function (Thanks, @mboelstra) +- [Issue #550](https://github.com/influxdata/influxdb/issues/550). Fix tests on 32-bit ARM +- [Issue #524](https://github.com/influxdata/influxdb/issues/524). Arithmetic operators and where conditions don't play nice together +- [Issue #561](https://github.com/influxdata/influxdb/issues/561). Fix missing query in parsing errors +- [Issue #563](https://github.com/influxdata/influxdb/issues/563). Add sample config for graphite over udp +- [Issue #537](https://github.com/influxdata/influxdb/issues/537). Incorrect query syntax causes internal error +- [Issue #565](https://github.com/influxdata/influxdb/issues/565). Empty series names shouldn't cause a panic +- [Issue #575](https://github.com/influxdata/influxdb/issues/575). Single point select doesn't interpret timestamps correctly +- [Issue #576](https://github.com/influxdata/influxdb/issues/576). We shouldn't set timestamps and sequence numbers when listing cq +- [Issue #560](https://github.com/influxdata/influxdb/issues/560). Use /dev/urandom instead of /dev/random +- [Issue #502](https://github.com/influxdata/influxdb/issues/502). Fix a + race condition in assigning id to db+series+field (Thanks @ohurvitz + for reporting this bug and providing a script to repro) + +### Features + +- [Issue #567](https://github.com/influxdata/influxdb/issues/567). Allow selecting from multiple series names by separating them with commas (Thanks, @peekeri) + +### Deprecated + +- [Issue #460](https://github.com/influxdata/influxdb/issues/460). Don't start automatically after installing +- [Issue #529](https://github.com/influxdata/influxdb/issues/529). Don't run influxdb as root +- [Issue #443](https://github.com/influxdata/influxdb/issues/443). Use `name` instead of `username` when returning cluster admins + +## v0.6.5 [2014-05-19] + +### Features + +- [Issue #551](https://github.com/influxdata/influxdb/issues/551). Add TOP and BOTTOM aggregate functions (Thanks, @chobie) + +### Bugfixes + +- [Issue #555](https://github.com/influxdata/influxdb/issues/555). Fix a regression introduced in the raft snapshot format + +## v0.6.4 [2014-05-16] + +### Features + +- Make the write batch size configurable (also applies to deletes) +- Optimize writing to multiple series +- [Issue #546](https://github.com/influxdata/influxdb/issues/546). Add UDP support for Graphite API (Thanks, @peekeri) + +### Bugfixes + +- Fix a bug in shard logic that caused short term shards to be clobbered with long term shards +- [Issue #489](https://github.com/influxdata/influxdb/issues/489). Remove replication factor from CreateDatabase command + +## v0.6.3 [2014-05-13] + +### Features + +- [Issue #505](https://github.com/influxdata/influxdb/issues/505). Return a version header with http the response (Thanks, @majst01) +- [Issue #520](https://github.com/influxdata/influxdb/issues/520). Print the version to the log file + +### Bugfixes + +- [Issue #516](https://github.com/influxdata/influxdb/issues/516). Close WAL log/index files when they aren't being used +- [Issue #532](https://github.com/influxdata/influxdb/issues/532). Don't log graphite connection EOF as an error +- [Issue #535](https://github.com/influxdata/influxdb/issues/535). WAL Replay hangs if response isn't received +- [Issue #538](https://github.com/influxdata/influxdb/issues/538). Don't panic if the same series existed twice in the request with different columns +- [Issue #536](https://github.com/influxdata/influxdb/issues/536). Joining the cluster after shards are creating shouldn't cause new nodes to panic +- [Issue #539](https://github.com/influxdata/influxdb/issues/539). count(distinct()) with fill shouldn't panic on empty groups +- [Issue #534](https://github.com/influxdata/influxdb/issues/534). Create a new series when interpolating + +## v0.6.2 [2014-05-09] + +### Bugfixes + +- [Issue #511](https://github.com/influxdata/influxdb/issues/511). Don't automatically create the database when a db user is created +- [Issue #512](https://github.com/influxdata/influxdb/issues/512). Group by should respect null values +- [Issue #518](https://github.com/influxdata/influxdb/issues/518). Filter Infinities and NaNs from the returned json +- [Issue #522](https://github.com/influxdata/influxdb/issues/522). Committing requests while replaying caused the WAL to skip some log files +- [Issue #369](https://github.com/influxdata/influxdb/issues/369). Fix some edge cases with WAL recovery + +## v0.6.1 [2014-05-06] + +### Bugfixes + +- [Issue #500](https://github.com/influxdata/influxdb/issues/500). Support `y` suffix in time durations +- [Issue #501](https://github.com/influxdata/influxdb/issues/501). Writes with invalid payload should be rejected +- [Issue #507](https://github.com/influxdata/influxdb/issues/507). New cluster admin passwords don't propagate properly to other nodes in a cluster +- [Issue #508](https://github.com/influxdata/influxdb/issues/508). Don't replay WAL entries for servers with no shards +- [Issue #464](https://github.com/influxdata/influxdb/issues/464). Admin UI shouldn't draw graphs for string columns +- [Issue #480](https://github.com/influxdata/influxdb/issues/480). Large values on the y-axis get cut off + +## v0.6.0 [2014-05-02] + +### Feature + +- [Issue #477](https://github.com/influxdata/influxdb/issues/477). Add a udp json interface (Thanks, Julien Ammous) +- [Issue #491](https://github.com/influxdata/influxdb/issues/491). Make initial root password settable through env variable (Thanks, Edward Muller) + +### Bugfixes + +- [Issue #469](https://github.com/influxdata/influxdb/issues/469). Drop continuous queries when a database is dropped +- [Issue #431](https://github.com/influxdata/influxdb/issues/431). Don't log to standard output if a log file is specified in the config file +- [Issue #483](https://github.com/influxdata/influxdb/issues/483). Return 409 if a database already exist (Thanks, Edward Muller) +- [Issue #486](https://github.com/influxdata/influxdb/issues/486). Columns used in the target of continuous query shouldn't be inserted in the time series +- [Issue #490](https://github.com/influxdata/influxdb/issues/490). Database user password's cannot be changed (Thanks, Edward Muller) +- [Issue #495](https://github.com/influxdata/influxdb/issues/495). Enforce write permissions properly + +## v0.5.12 [2014-04-29] + +### Bugfixes + +- [Issue #419](https://github.com/influxdata/influxdb/issues/419),[Issue #478](https://github.com/influxdata/influxdb/issues/478). Allow hostname, raft and protobuf ports to be changed, without requiring manual intervention from the user + +## v0.5.11 [2014-04-25] + +### Features + +- [Issue #471](https://github.com/influxdata/influxdb/issues/471). Read and write permissions should be settable through the http api + +### Bugfixes + +- [Issue #323](https://github.com/influxdata/influxdb/issues/323). Continuous queries should guard against data loops +- [Issue #473](https://github.com/influxdata/influxdb/issues/473). Engine memory optimization + +## v0.5.10 [2014-04-22] + +### Features + +- [Issue #463](https://github.com/influxdata/influxdb/issues/463). Allow series names to use any character (escape by wrapping in double quotes) +- [Issue #447](https://github.com/influxdata/influxdb/issues/447). Allow @ in usernames +- [Issue #466](https://github.com/influxdata/influxdb/issues/466). Allow column names to use any character (escape by wrapping in double quotes) + +### Bugfixes + +- [Issue #458](https://github.com/influxdata/influxdb/issues/458). Continuous queries with group by time() and a column should insert sequence numbers of 1 +- [Issue #457](https://github.com/influxdata/influxdb/issues/457). Deleting series that start with capital letters should work + +## v0.5.9 [2014-04-18] + +### Bugfixes + +- [Issue #446](https://github.com/influxdata/influxdb/issues/446). Check for (de)serialization errors +- [Issue #456](https://github.com/influxdata/influxdb/issues/456). Continuous queries failed if one of the group by columns had null value +- [Issue #455](https://github.com/influxdata/influxdb/issues/455). Comparison operators should ignore null values + +## v0.5.8 [2014-04-17] + +- Renamed config.toml.sample to config.sample.toml + +### Bugfixes + +- [Issue #244](https://github.com/influxdata/influxdb/issues/244). Reconstruct the query from the ast +- [Issue #449](https://github.com/influxdata/influxdb/issues/449). Heartbeat timeouts can cause reading from connection to lock up +- [Issue #451](https://github.com/influxdata/influxdb/issues/451). Reduce the aggregation state that is kept in memory so that + aggregation queries over large periods of time don't take insance amount of memory + +## v0.5.7 [2014-04-15] + +### Features + +- Queries are now logged as INFO in the log file before they run + +### Bugfixes + +- [Issue #328](https://github.com/influxdata/influxdb/issues/328). Join queries with math expressions don't work +- [Issue #440](https://github.com/influxdata/influxdb/issues/440). Heartbeat timeouts in logs +- [Issue #442](https://github.com/influxdata/influxdb/issues/442). shouldQuerySequentially didn't work as expected + causing count(*) queries on large time series to use + lots of memory +- [Issue #437](https://github.com/influxdata/influxdb/issues/437). Queries with negative constants don't parse properly +- [Issue #432](https://github.com/influxdata/influxdb/issues/432). Deleted data using a delete query is resurrected after a server restart +- [Issue #439](https://github.com/influxdata/influxdb/issues/439). Report the right location of the error in the query +- Fix some bugs with the WAL recovery on startup + +## v0.5.6 [2014-04-08] + +### Features + +- [Issue #310](https://github.com/influxdata/influxdb/issues/310). Request should support multiple timeseries +- [Issue #416](https://github.com/influxdata/influxdb/issues/416). Improve the time it takes to drop database + +### Bugfixes + +- [Issue #413](https://github.com/influxdata/influxdb/issues/413). Don't assume that group by interval is greater than a second +- [Issue #415](https://github.com/influxdata/influxdb/issues/415). Include the database when sending an auth error back to the user +- [Issue #421](https://github.com/influxdata/influxdb/issues/421). Make read timeout a config option +- [Issue #392](https://github.com/influxdata/influxdb/issues/392). Different columns in different shards returns invalid results when a query spans those shards + +### Bugfixes + +## v0.5.5 [2014-04-04] + +- Upgrade leveldb 1.10 -> 1.15 + + This should be a backward compatible change, but is here for documentation only + +### Feature + +- Add a command line option to repair corrupted leveldb databases on startup +- [Issue #401](https://github.com/influxdata/influxdb/issues/401). No limit on the number of columns in the group by clause + +### Bugfixes + +- [Issue #398](https://github.com/influxdata/influxdb/issues/398). Support now() and NOW() in the query lang +- [Issue #403](https://github.com/influxdata/influxdb/issues/403). Filtering should work with join queries +- [Issue #404](https://github.com/influxdata/influxdb/issues/404). Filtering with invalid condition shouldn't crash the server +- [Issue #405](https://github.com/influxdata/influxdb/issues/405). Percentile shouldn't crash for small number of values +- [Issue #408](https://github.com/influxdata/influxdb/issues/408). Make InfluxDB recover from internal bugs and panics +- [Issue #390](https://github.com/influxdata/influxdb/issues/390). Multiple response.WriteHeader when querying as admin +- [Issue #407](https://github.com/influxdata/influxdb/issues/407). Start processing continuous queries only after the WAL is initialized +- Close leveldb databases properly if we couldn't create a new Shard. See leveldb\_shard\_datastore\_test:131 + +## v0.5.4 [2014-04-02] + +### Bugfixes + +- [Issue #386](https://github.com/influxdata/influxdb/issues/386). Drop series should work with series containing dots +- [Issue #389](https://github.com/influxdata/influxdb/issues/389). Filtering shouldn't stop prematurely +- [Issue #341](https://github.com/influxdata/influxdb/issues/341). Make the number of shards that are queried in parallel configurable +- [Issue #394](https://github.com/influxdata/influxdb/issues/394). Support count(distinct) and count(DISTINCT) +- [Issue #362](https://github.com/influxdata/influxdb/issues/362). Limit should be enforced after aggregation + +## v0.5.3 [2014-03-31] + +### Bugfixes + +- [Issue #378](https://github.com/influxdata/influxdb/issues/378). Indexing should return if there are no requests added since the last index +- [Issue #370](https://github.com/influxdata/influxdb/issues/370). Filtering and limit should be enforced on the shards +- [Issue #379](https://github.com/influxdata/influxdb/issues/379). Boolean columns should be usable in where clauses +- [Issue #381](https://github.com/influxdata/influxdb/issues/381). Should be able to do deletes as a cluster admin + +## v0.5.2 [2014-03-28] + +### Bugfixes + +- [Issue #342](https://github.com/influxdata/influxdb/issues/342). Data resurrected after a server restart +- [Issue #367](https://github.com/influxdata/influxdb/issues/367). Influxdb won't start if the api port is commented out +- [Issue #355](https://github.com/influxdata/influxdb/issues/355). Return an error on wrong time strings +- [Issue #331](https://github.com/influxdata/influxdb/issues/331). Allow negative time values in the where clause +- [Issue #371](https://github.com/influxdata/influxdb/issues/371). Seris index isn't deleted when the series is dropped +- [Issue #360](https://github.com/influxdata/influxdb/issues/360). Store and recover continuous queries + +## v0.5.1 [2014-03-24] + +### Bugfixes + +- Revert the version of goraft due to a bug found in the latest version + +## v0.5.0 [2014-03-24] + +### Features + +- [Issue #293](https://github.com/influxdata/influxdb/pull/293). Implement a Graphite listener + +### Bugfixes + +- [Issue #340](https://github.com/influxdata/influxdb/issues/340). Writing many requests while replaying seems to cause commits out of order + +## v0.5.0-rc.6 [2014-03-20] + +### Bugfixes + +- Increase raft election timeout to avoid unecessary relections +- Sort points before writing them to avoid an explosion in the request + number when the points are written randomly +- [Issue #335](https://github.com/influxdata/influxdb/issues/335). Fixes regexp for interpolating more than one column value in continuous queries +- [Issue #318](https://github.com/influxdata/influxdb/pull/318). Support EXPLAIN queries +- [Issue #333](https://github.com/influxdata/influxdb/pull/333). Fail + when the password is too short or too long instead of passing it to + the crypto library + +## v0.5.0-rc.5 [2014-03-11] + +### Bugfixes + +- [Issue #312](https://github.com/influxdata/influxdb/issues/312). WAL should wait for server id to be set before recovering +- [Issue #301](https://github.com/influxdata/influxdb/issues/301). Use ref counting to guard against race conditions in the shard cache +- [Issue #319](https://github.com/influxdata/influxdb/issues/319). Propagate engine creation error correctly to the user +- [Issue #316](https://github.com/influxdata/influxdb/issues/316). Make + sure we don't starve goroutines if we get an access denied error + from one of the shards +- [Issue #306](https://github.com/influxdata/influxdb/issues/306). Deleting/Dropping database takes a lot of memory +- [Issue #302](https://github.com/influxdata/influxdb/issues/302). Should be able to set negative timestamps on points +- [Issue #327](https://github.com/influxdata/influxdb/issues/327). Make delete queries not use WAL. This addresses #315, #317 and #314 +- [Issue #321](https://github.com/influxdata/influxdb/issues/321). Make sure we split points on shards properly + +## v0.5.0-rc.4 [2014-03-07] + +### Bugfixes + +- [Issue #298](https://github.com/influxdata/influxdb/issues/298). Fix limit when querying multiple shards +- [Issue #305](https://github.com/influxdata/influxdb/issues/305). Shard ids not unique after restart +- [Issue #309](https://github.com/influxdata/influxdb/issues/309). Don't relog the requests on the remote server +- Fix few bugs in the WAL and refactor the way it works (this requires purging the WAL from previous rc) + +## v0.5.0-rc.3 [2014-03-03] + +### Bugfixes +- [Issue #69](https://github.com/influxdata/influxdb/issues/69). Support column aliases +- [Issue #287](https://github.com/influxdata/influxdb/issues/287). Make the lru cache size configurable +- [Issue #38](https://github.com/influxdata/influxdb/issues/38). Fix a memory leak discussed in this story +- [Issue #286](https://github.com/influxdata/influxdb/issues/286). Make the number of open shards configurable +- Make LevelDB use the max open files configuration option. + +## v0.5.0-rc.2 [2014-02-27] + +### Bugfixes + +- [Issue #274](https://github.com/influxdata/influxdb/issues/274). Crash after restart +- [Issue #277](https://github.com/influxdata/influxdb/issues/277). Ensure duplicate shards won't be created +- [Issue #279](https://github.com/influxdata/influxdb/issues/279). Limits not working on regex queries +- [Issue #281](https://github.com/influxdata/influxdb/issues/281). `./influxdb -v` should print the sha when building from source +- [Issue #283](https://github.com/influxdata/influxdb/issues/283). Dropping shard and restart in cluster causes panic. +- [Issue #288](https://github.com/influxdata/influxdb/issues/288). Sequence numbers should be unique per server id + +## v0.5.0-rc.1 [2014-02-25] + +### Bugfixes + +- Ensure large deletes don't take too much memory +- [Issue #240](https://github.com/influxdata/influxdb/pull/240). Unable to query against columns with `.` in the name. +- [Issue #250](https://github.com/influxdata/influxdb/pull/250). different result between normal and continuous query with "group by" clause +- [Issue #216](https://github.com/influxdata/influxdb/pull/216). Results with no points should exclude columns and points + +### Features + +- [Issue #243](https://github.com/influxdata/influxdb/issues/243). Should have endpoint to GET a user's attributes. +- [Issue #269](https://github.com/influxdata/influxdb/pull/269), [Issue #65](https://github.com/influxdata/influxdb/issues/65) New clustering architecture (see docs), with the side effect that queries can be distributed between multiple shards +- [Issue #164](https://github.com/influxdata/influxdb/pull/269),[Issue #103](https://github.com/influxdata/influxdb/pull/269),[Issue #166](https://github.com/influxdata/influxdb/pull/269),[Issue #165](https://github.com/influxdata/influxdb/pull/269),[Issue #132](https://github.com/influxdata/influxdb/pull/269) Make request log a log file instead of leveldb with recovery on startup + +### Deprecated + +- [Issue #189](https://github.com/influxdata/influxdb/issues/189). `/cluster_admins` and `/db/:db/users` return usernames in a `name` key instead of `username` key. +- [Issue #216](https://github.com/influxdata/influxdb/pull/216). Results with no points should exclude columns and points + +## v0.4.4 [2014-02-05] + +### Features + +- Make the leveldb max open files configurable in the toml file + +## v0.4.3 [2014-01-31] + +### Bugfixes + +- [Issue #225](https://github.com/influxdata/influxdb/issues/225). Remove a hard limit on the points returned by the datastore +- [Issue #223](https://github.com/influxdata/influxdb/issues/223). Null values caused count(distinct()) to panic +- [Issue #224](https://github.com/influxdata/influxdb/issues/224). Null values broke replication due to protobuf limitation + +## v0.4.1 [2014-01-30] + +### Features + +- [Issue #193](https://github.com/influxdata/influxdb/issues/193). Allow logging to stdout. Thanks @schmurfy +- [Issue #190](https://github.com/influxdata/influxdb/pull/190). Add support for SSL. +- [Issue #194](https://github.com/influxdata/influxdb/pull/194). Should be able to disable Admin interface. + +### Bugfixes + +- [Issue #33](https://github.com/influxdata/influxdb/issues/33). Don't call WriteHeader more than once per request +- [Issue #195](https://github.com/influxdata/influxdb/issues/195). Allow the bind address to be configurable, Thanks @schmurfy. +- [Issue #199](https://github.com/influxdata/influxdb/issues/199). Make the test timeout configurable +- [Issue #200](https://github.com/influxdata/influxdb/issues/200). Selecting `time` or `sequence_number` silently fail +- [Issue #215](https://github.com/influxdata/influxdb/pull/215). Server fails to start up after Raft log compaction and restart. + +## v0.4.0 [2014-01-17] + +## Features + +- [Issue #86](https://github.com/influxdata/influxdb/issues/86). Support arithmetic expressions in select clause +- [Issue #92](https://github.com/influxdata/influxdb/issues/92). Change '==' to '=' and '!=' to '<>' +- [Issue #88](https://github.com/influxdata/influxdb/issues/88). Support datetime strings +- [Issue #64](https://github.com/influxdata/influxdb/issues/64). Shard writes and queries across cluster with replay for briefly downed nodes (< 24 hrs) +- [Issue #78](https://github.com/influxdata/influxdb/issues/78). Sequence numbers persist across restarts so they're not reused +- [Issue #102](https://github.com/influxdata/influxdb/issues/102). Support expressions in where condition +- [Issue #101](https://github.com/influxdata/influxdb/issues/101). Support expressions in aggregates +- [Issue #62](https://github.com/influxdata/influxdb/issues/62). Support updating and deleting column values +- [Issue #96](https://github.com/influxdata/influxdb/issues/96). Replicate deletes in a cluster +- [Issue #94](https://github.com/influxdata/influxdb/issues/94). delete queries +- [Issue #116](https://github.com/influxdata/influxdb/issues/116). Use proper logging +- [Issue #40](https://github.com/influxdata/influxdb/issues/40). Use TOML instead of JSON in the config file +- [Issue #99](https://github.com/influxdata/influxdb/issues/99). Support list series in the query language +- [Issue #149](https://github.com/influxdata/influxdb/issues/149). Cluster admins should be able to perform reads and writes. +- [Issue #108](https://github.com/influxdata/influxdb/issues/108). Querying one point using `time =` +- [Issue #114](https://github.com/influxdata/influxdb/issues/114). Servers should periodically check that they're consistent. +- [Issue #93](https://github.com/influxdata/influxdb/issues/93). Should be able to drop a time series +- [Issue #177](https://github.com/influxdata/influxdb/issues/177). Support drop series in the query language. +- [Issue #184](https://github.com/influxdata/influxdb/issues/184). Implement Raft log compaction. +- [Issue #153](https://github.com/influxdata/influxdb/issues/153). Implement continuous queries + +### Bugfixes + +- [Issue #90](https://github.com/influxdata/influxdb/issues/90). Group by multiple columns panic +- [Issue #89](https://github.com/influxdata/influxdb/issues/89). 'Group by' combined with 'where' not working +- [Issue #106](https://github.com/influxdata/influxdb/issues/106). Don't panic if we only see one point and can't calculate derivative +- [Issue #105](https://github.com/influxdata/influxdb/issues/105). Panic when using a where clause that reference columns with null values +- [Issue #61](https://github.com/influxdata/influxdb/issues/61). Remove default limits from queries +- [Issue #118](https://github.com/influxdata/influxdb/issues/118). Make column names starting with '_' legal +- [Issue #121](https://github.com/influxdata/influxdb/issues/121). Don't fall back to the cluster admin auth if the db user auth fails +- [Issue #127](https://github.com/influxdata/influxdb/issues/127). Return error on delete queries with where condition that don't have time +- [Issue #117](https://github.com/influxdata/influxdb/issues/117). Fill empty groups with default values +- [Issue #150](https://github.com/influxdata/influxdb/pull/150). Fix parser for when multiple divisions look like a regex. +- [Issue #158](https://github.com/influxdata/influxdb/issues/158). Logged deletes should be stored with the time range if missing. +- [Issue #136](https://github.com/influxdata/influxdb/issues/136). Make sure writes are replicated in order to avoid triggering replays +- [Issue #145](https://github.com/influxdata/influxdb/issues/145). Server fails to join cluster if all starting at same time. +- [Issue #176](https://github.com/influxdata/influxdb/issues/176). Drop database should take effect on all nodes +- [Issue #180](https://github.com/influxdata/influxdb/issues/180). Column names not returned when running multi-node cluster and writing more than one point. +- [Issue #182](https://github.com/influxdata/influxdb/issues/182). Queries with invalid limit clause crash the server + +### Deprecated + +- deprecate '==' and '!=' in favor of '=' and '<>', respectively +- deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint +- deprecate `username` field for a more consistent `name` field in `/db/:db/users` and `/cluster_admins` +- deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should + be used to update user flags, password, etc. +- Querying for column names that don't exist no longer throws an error. + +## v0.3.2 + +## Features + +- [Issue #82](https://github.com/influxdata/influxdb/issues/82). Add endpoint for listing available admin interfaces. +- [Issue #80](https://github.com/influxdata/influxdb/issues/80). Support durations when specifying start and end time +- [Issue #81](https://github.com/influxdata/influxdb/issues/81). Add support for IN + +## Bugfixes + +- [Issue #75](https://github.com/influxdata/influxdb/issues/75). Don't allow time series names that start with underscore +- [Issue #85](https://github.com/influxdata/influxdb/issues/85). Non-existing columns exist after they have been queried before + +## v0.3.0 + +## Features + +- [Issue #51](https://github.com/influxdata/influxdb/issues/51). Implement first and last aggregates +- [Issue #35](https://github.com/influxdata/influxdb/issues/35). Support table aliases in Join Queries +- [Issue #71](https://github.com/influxdata/influxdb/issues/71). Add WillReturnSingleSeries to the Query +- [Issue #61](https://github.com/influxdata/influxdb/issues/61). Limit should default to 10k +- [Issue #59](https://github.com/influxdata/influxdb/issues/59). Add histogram aggregate function + +## Bugfixes + +- Fix join and merges when the query is a descending order query +- [Issue #57](https://github.com/influxdata/influxdb/issues/57). Don't panic when type of time != float +- [Issue #63](https://github.com/influxdata/influxdb/issues/63). Aggregate queries should not have a sequence_number column + +## v0.2.0 + +### Features + +- [Issue #37](https://github.com/influxdata/influxdb/issues/37). Support the negation of the regex matcher !~ +- [Issue #47](https://github.com/influxdata/influxdb/issues/47). Spill out query and database detail at the time of bug report + +### Bugfixes + +- [Issue #36](https://github.com/influxdata/influxdb/issues/36). The regex operator should be =~ not ~= +- [Issue #39](https://github.com/influxdata/influxdb/issues/39). Return proper content types from the http api +- [Issue #42](https://github.com/influxdata/influxdb/issues/42). Make the api consistent with the docs +- [Issue #41](https://github.com/influxdata/influxdb/issues/41). Table/Points not deleted when database is dropped +- [Issue #45](https://github.com/influxdata/influxdb/issues/45). Aggregation shouldn't mess up the order of the points +- [Issue #44](https://github.com/influxdata/influxdb/issues/44). Fix crashes on RHEL 5.9 +- [Issue #34](https://github.com/influxdata/influxdb/issues/34). Ascending order always return null for columns that have a null value +- [Issue #55](https://github.com/influxdata/influxdb/issues/55). Limit should limit the points that match the Where clause +- [Issue #53](https://github.com/influxdata/influxdb/issues/53). Writing null values via HTTP API fails + +### Deprecated + +- Preparing to deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint +- Preparing to deprecate `username` field for a more consistent `name` field in the `/db/:db/users` +- Preparing to deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should + be used to update user flags, password, etc. + +## v0.1.0 + +### Features + +- [Issue #29](https://github.com/influxdata/influxdb/issues/29). Semicolon is now optional in queries +- [Issue #31](https://github.com/influxdata/influxdb/issues/31). Support Basic Auth as well as query params for authentication. + +### Bugfixes + +- Don't allow creating users with empty username +- [Issue #22](https://github.com/influxdata/influxdb/issues/22). Don't set goroot if it was set +- [Issue #25](https://github.com/influxdata/influxdb/issues/25). Fix queries that use the median aggregator +- [Issue #26](https://github.com/influxdata/influxdb/issues/26). Default log and db directories should be in /opt/influxdb/shared/data +- [Issue #27](https://github.com/influxdata/influxdb/issues/27). Group by should not blow up if the one of the columns in group by has null values +- [Issue #30](https://github.com/influxdata/influxdb/issues/30). Column indexes/names getting off somehow +- [Issue #32](https://github.com/influxdata/influxdb/issues/32). Fix many typos in the codebase. Thanks @pborreli + +## v0.0.9 + +#### Features + +- Add stddev(...) support +- Better docs, thanks @auxesis and @d-snp. + +#### Bugfixes + +- Set PYTHONPATH and CC appropriately on mac os x. +- [Issue #18](https://github.com/influxdata/influxdb/issues/18). Fix 386 debian and redhat packages +- [Issue #23](https://github.com/influxdata/influxdb/issues/23). Fix the init scripts on redhat + +## v0.0.8 + +#### Features + +- Add a way to reset the root password from the command line. +- Add distinct(..) and derivative(...) support +- Print test coverage if running go1.2 + +#### Bugfixes + +- Fix the default admin site path in the .deb and .rpm packages. +- Fix the configuration filename in the .tar.gz package. + +## v0.0.7 + +#### Features + +- include the admin site in the repo to make it easier for newcomers. + +## v0.0.6 + +#### Features + +- Add count(distinct(..)) support + +#### Bugfixes + +- Reuse levigo read/write options. + +## v0.0.5 + +#### Features + +- Cache passwords in memory to speed up password verification +- Add MERGE and INNER JOIN support + +#### Bugfixes + +- All columns should be returned if `select *` was used +- Read/Write benchmarks + +## v0.0.2 + +#### Features + +- Add an admin UI +- Deb and RPM packages + +#### Bugfixes + +- Fix some nil pointer dereferences +- Cleanup the aggregators implementation + +## v0.0.1 [2013-10-22] + + * Initial Release diff --git a/vendor/github.com/influxdata/influxdb/CODING_GUIDELINES.md b/vendor/github.com/influxdata/influxdb/CODING_GUIDELINES.md new file mode 100644 index 000000000000..6c1d2f2e8b11 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/CODING_GUIDELINES.md @@ -0,0 +1,82 @@ +_This document is currently in draft form._ + +# Background + +The goal of this guide is to capture some Do and Don'ts of Go code for the InfluxDB database. When it comes to Go, writing good code is often achieved with the help of tools like `go fmt` and `go vet`. However there are still some practices not enforceable by any tools. This guide lists some specific practices to follow when writing code for the database. + +*Like everything, one needs to use good judgment.* There will always be times when it doesn't make sense to follow a guideline outlined in this document. If that case arises, be ready to justify your choices. + +# The Guidelines + +## Try not to use third-party libraries + +A third-party package is defined as one that is not part of the standard Go distribution. Generally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarily. We'll often write a little bit of code rather than pull in a third-party package. Of course, we do use some third-party packages -- most importantly we use [BoltDB](https://github.com/boltdb/bolt) in some storage engines. So to maximise the chance your change will be accepted by us, use only the standard libraries, or the third-party packages we have decided to use. + +For rationale, check out the post [The Case Against Third Party Libraries](http://blog.gopheracademy.com/advent-2014/case-against-3pl/). + +## Always include a default case in a 'switch' statement +The lack of a `default` case in a `switch` statement can be a significant source of bugs. This is particularly true in the case of a type-assertions switch. So always include a `default` statement unless you have an explicit reason not to. + +## When -- and when not -- set a channel to 'nil' + +## Use defer with anonymous functions to handle complex locking +Consider a block of code like the following. +``` + mu.Lock() + if foo == "quit" { + mu.Unlock() + return + } else if foo == "continue" { + if bar == "quit" { + mu.Unlock() + return + } + bar = "still going" + } else { + qux = "here at last" + mu.Unlock() + return + } + foo = "more to do" + bar = "still more to do" + mu.Unlock() + + qux = "finished now" + return +``` +While this is obviously contrived, complex lock control like this is sometimes required, and doesn't lend itself to `defer`. But as the code evolves, it's easy to introduce new cases, and forget to release locks. One way to address this is to use an anonymous function like so: +``` + more := func() bool { + mu.Lock() + defer mu.Unlock() + if foo == "quit" { + return false + } else if foo == "continue" { + if bar == "quit" { + return false + } + bar = "still going" + } else { + qux = "here at last" + return false + } + foo = "more to do" + bar = "still more to do" + return true + }() + + if more { + qux = "finished" + } + return +``` +This allows us to use `defer` but ensures that if any new cases are added to the logic within the anonymous function, the lock will always be released. Another advantage of this approach is that `defer` will still run even in the event of a panic, ensuring the locks will be released even in that case. + +## When to call 'panic()' + +# Useful links +- [Useful techniques in Go](http://arslan.io/ten-useful-techniques-in-go) +- [Go in production](http://peter.bourgon.org/go-in-production/) +- [Principles of designing Go APIs with channels](https://inconshreveable.com/07-08-2014/principles-of-designing-go-apis-with-channels/) +- [Common mistakes in Golang](http://soryy.com/blog/2014/common-mistakes-with-go-lang/). Especially this section `Loops, Closures, and Local Variables` + diff --git a/vendor/github.com/influxdata/influxdb/CONTRIBUTING.md b/vendor/github.com/influxdata/influxdb/CONTRIBUTING.md new file mode 100644 index 000000000000..f62ebd077edd --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/CONTRIBUTING.md @@ -0,0 +1,280 @@ +Contributing to InfluxDB +======================== + +Bug reports +--------------- +Before you file an issue, please search existing issues in case it has already been filed, or perhaps even fixed. If you file an issue, please include the following. +* Full details of your operating system (or distribution) e.g. 64-bit Ubuntu 14.04. +* The version of InfluxDB you are running +* Whether you installed it using a pre-built package, or built it from source. +* A small test case, if applicable, that demonstrates the issues. + +Remember the golden rule of bug reports: **The easier you make it for us to reproduce the problem, the faster it will get fixed.** +If you have never written a bug report before, or if you want to brush up on your bug reporting skills, we recommend reading [Simon Tatham's essay "How to Report Bugs Effectively."](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html) + +Test cases should be in the form of `curl` commands. For example: +```bash +# create database +curl -G http://localhost:8086/query --data-urlencode "q=CREATE DATABASE mydb" + +# create retention policy +curl -G http://localhost:8086/query --data-urlencode "q=CREATE RETENTION POLICY myrp ON mydb DURATION 365d REPLICATION 1 DEFAULT" + +# write data +curl -X POST http://localhost:8086/write --data-urlencode "db=mydb" --data-binary "cpu,region=useast,host=server_1,service=redis value=61" + +# Delete a Measurement +curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=DROP MEASUREMENT cpu' + +# Query the Measurement +# Bug: expected it to return no data, but data comes back. +curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=SELECT * from cpu' +``` +**If you don't include a clear test case like this, your issue may not be investigated, and may even be closed**. If writing the data is too difficult, please zip up your data directory and include a link to it in your bug report. + +Please note that issues are *not the place to file general questions* such as "how do I use collectd with InfluxDB?" Questions of this nature should be sent to the [Google Group](https://groups.google.com/forum/#!forum/influxdb), not filed as issues. Issues like this will be closed. + +Feature requests +--------------- +We really like to receive feature requests, as it helps us prioritize our work. Please be clear about your requirements, as incomplete feature requests may simply be closed if we don't understand what you would like to see added to InfluxDB. + +Contributing to the source code +--------------- + +InfluxDB follows standard Go project structure. This means that all your Go development are done in `$GOPATH/src`. GOPATH can be any directory under which InfluxDB and all its dependencies will be cloned. For full details on the project structure, follow along below. + +You should also read our [coding guide](https://github.com/influxdata/influxdb/blob/master/CODING_GUIDELINES.md), to understand better how to write code for InfluxDB. + +Submitting a pull request +------------ +To submit a pull request you should fork the InfluxDB repository, and make your change on a feature branch of your fork. Then generate a pull request from your branch against *master* of the InfluxDB repository. Include in your pull request details of your change -- the why *and* the how -- as well as the testing your performed. Also, be sure to run the test suite with your change in place. Changes that cause tests to fail cannot be merged. + +There will usually be some back and forth as we finalize the change, but once that completes it may be merged. + +To assist in review for the PR, please add the following to your pull request comment: + +```md +- [ ] CHANGELOG.md updated +- [ ] Rebased/mergable +- [ ] Tests pass +- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed) +``` + +Signing the CLA +--------------- + +If you are going to be contributing back to InfluxDB please take a +second to sign our CLA, which can be found +[on our website](https://influxdata.com/community/cla/). + +Installing Go +------------- +InfluxDB requires Go 1.7.4. + +At InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions +on how to install it see [the gvm page on github](https://github.com/moovweb/gvm). + +After installing gvm you can install and set the default go version by +running the following: + + gvm install go1.7.4 + gvm use go1.7.4 --default + +Installing GDM +------------- +InfluxDB uses [gdm](https://github.com/sparrc/gdm) to manage dependencies. Install it by running the following: + + go get github.com/sparrc/gdm + +Revision Control Systems +------------- +Go has the ability to import remote packages via revision control systems with the `go get` command. To ensure that you can retrieve any remote package, be sure to install the following rcs software to your system. +Currently the project only depends on `git` and `mercurial`. + +* [Install Git](http://git-scm.com/book/en/Getting-Started-Installing-Git) +* [Install Mercurial](http://mercurial.selenic.com/wiki/Download) + +Getting the source +------ +Setup the project structure and fetch the repo like so: + +```bash + mkdir $HOME/gocodez + export GOPATH=$HOME/gocodez + go get github.com/influxdata/influxdb +``` + +You can add the line `export GOPATH=$HOME/gocodez` to your bash/zsh file to be set for every shell instead of having to manually run it everytime. + +Cloning a fork +------------- +If you wish to work with fork of InfluxDB, your own fork for example, you must still follow the directory structure above. But instead of cloning the main repo, instead clone your fork. Follow the steps below to work with a fork: + +```bash + export GOPATH=$HOME/gocodez + mkdir -p $GOPATH/src/github.com/influxdata + cd $GOPATH/src/github.com/influxdata + git clone git@github.com:/influxdb +``` + +Retaining the directory structure `$GOPATH/src/github.com/influxdata` is necessary so that Go imports work correctly. + +Build and Test +----- + +Make sure you have Go installed and the project structure as shown above. To then get the dependencies for the project, execute the following commands: + +```bash +cd $GOPATH/src/github.com/influxdata/influxdb +gdm restore +``` + +To then build and install the binaries, run the following command. +```bash +go clean ./... +go install ./... +``` +The binaries will be located in `$GOPATH/bin`. Please note that the InfluxDB binary is named `influxd`, not `influxdb`. + +To set the version and commit flags during the build pass the following to the **install** command: + +```bash +-ldflags="-X main.version=$VERSION -X main.branch=$BRANCH -X main.commit=$COMMIT" +``` + +where `$VERSION` is the version, `$BRANCH` is the branch, and `$COMMIT` is the git commit hash. + +If you want to build packages, see `build.py` usage information: + +```bash +python build.py --help + +# Or to build a package for your current system +python build.py --package +``` + +To run the tests, execute the following command: + +```bash +cd $GOPATH/src/github.com/influxdata/influxdb +go test -v ./... + +# run tests that match some pattern +go test -run=TestDatabase . -v + +# run tests and show coverage +go test -coverprofile /tmp/cover . && go tool cover -html /tmp/cover +``` + +To install go cover, run the following command: +``` +go get golang.org/x/tools/cmd/cover +``` + +Generated Google Protobuf code +----------------- +Most changes to the source do not require that the generated protocol buffer code be changed. But if you need to modify the protocol buffer code, you'll first need to install the protocol buffers toolchain. + +First install the [protocol buffer compiler](https://developers.google.com/protocol-buffers/ +) 2.6.1 or later for your OS: + +Then install the go plugins: + +```bash +go get github.com/gogo/protobuf/proto +go get github.com/gogo/protobuf/protoc-gen-gogo +go get github.com/gogo/protobuf/gogoproto +``` + +Finally run, `go generate` after updating any `*.proto` file: + +```bash +go generate ./... +``` +**Troubleshooting** + +If generating the protobuf code is failing for you, check each of the following: +* Ensure the protobuf library can be found. Make sure that `LD_LIBRRARY_PATH` includes the directory in which the library `libprotoc.so` has been installed. +* Ensure the command `protoc-gen-gogo`, found in `GOPATH/bin`, is on your path. This can be done by adding `GOPATH/bin` to `PATH`. + + +Generated Go Templates +---------------------- + +The query engine requires optimizes data structures for each data type so +instead of writing each implementation several times we use templates. _Do not +change code that ends in a `.gen.go` extension!_ Instead you must edit the +`.gen.go.tmpl` file that was used to generate it. + +Once you've edited the template file, you'll need the [`tmpl`][tmpl] utility +to generate the code: + +```sh +$ go get github.com/benbjohnson/tmpl +``` + +Then you can regenerate all templates in the project: + +```sh +$ go generate ./... +``` + +[tmpl]: https://github.com/benbjohnson/tmpl + + +Pre-commit checks +------------- + +We have a pre-commit hook to make sure code is formatted properly and vetted before you commit any changes. We strongly recommend using the pre-commit hook to guard against accidentally committing unformatted code. To use the pre-commit hook, run the following: +```bash + cd $GOPATH/src/github.com/influxdata/influxdb + cp .hooks/pre-commit .git/hooks/ +``` +In case the commit is rejected because it's not formatted you can run +the following to format the code: + +``` +go fmt ./... +go vet ./... +``` + +To install go vet, run the following command: +``` +go get golang.org/x/tools/cmd/vet +``` + +NOTE: If you have not installed mercurial, the above command will fail. See [Revision Control Systems](#revision-control-systems) above. + +For more information on `go vet`, [read the GoDoc](https://godoc.org/golang.org/x/tools/cmd/vet). + +Profiling +----- +When troubleshooting problems with CPU or memory the Go toolchain can be helpful. You can start InfluxDB with CPU and memory profiling turned on. For example: + +```sh +# start influx with profiling +./influxd -cpuprofile influxdcpu.prof -memprof influxdmem.prof +# run queries, writes, whatever you're testing +# Quit out of influxd and influxd.prof will then be written. +# open up pprof to examine the profiling data. +go tool pprof ./influxd influxd.prof +# once inside run "web", opens up browser with the CPU graph +# can also run "web " to zoom in. Or "list " to see specific lines +``` +Note that when you pass the binary to `go tool pprof` *you must specify the path to the binary*. + +If you are profiling benchmarks built with the `testing` package, you may wish +to use the [`github.com/pkg/profile`](github.com/pkg/profile) package to limit +the code being profiled: + +```go +func BenchmarkSomething(b *testing.B) { + // do something intensive like fill database with data... + defer profile.Start(profile.ProfilePath("/tmp"), profile.MemProfile).Stop() + // do something that you want to profile... +} +``` + +Continuous Integration testing +----- +InfluxDB uses CircleCI for continuous integration testing. To see how the code is built and tested, check out [this file](https://github.com/influxdata/influxdb/blob/master/circle-test.sh). It closely follows the build and test process outlined above. You can see the exact version of Go InfluxDB uses for testing by consulting that file. diff --git a/vendor/github.com/influxdata/influxdb/DOCKER.md b/vendor/github.com/influxdata/influxdb/DOCKER.md new file mode 100644 index 000000000000..4d902e642ec8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/DOCKER.md @@ -0,0 +1,30 @@ +# Docker Setup +======================== + +This document describes how to build and run a minimal InfluxDB container under Docker. Currently, it has only been tested for local development and assumes that you have a working docker environment. + +## Building Image + +To build a docker image for InfluxDB from your current checkout, run the following: + +``` +$ ./build-docker.sh +``` + +This script uses the `golang:1.7.4` image to build a fully static binary of `influxd` and then adds it to a minimal `scratch` image. + +To build the image using a different version of go: + +``` +$ GO_VER=1.7.4 ./build-docker.sh +``` + +Available version can be found [here](https://hub.docker.com/_/golang/). + +## Single Node Container + +This will start an interactive, single-node, that publishes the containers port `8086` and `8088` to the hosts ports `8086` and `8088` respectively. This is identical to starting `influxd` manually. + +``` +$ docker run -it -p 8086:8086 -p 8088:8088 influxdb +``` diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile b/vendor/github.com/influxdata/influxdb/Dockerfile new file mode 100644 index 000000000000..d30cd300dbbe --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile @@ -0,0 +1,24 @@ +FROM busybox:ubuntu-14.04 + +MAINTAINER Jason Wilder "" + +# admin, http, udp, cluster, graphite, opentsdb, collectd +EXPOSE 8083 8086 8086/udp 8088 2003 4242 25826 + +WORKDIR /app + +# copy binary into image +COPY influxd /app/ + +# Add influxd to the PATH +ENV PATH=/app:$PATH + +# Generate a default config +RUN influxd config > /etc/influxdb.toml + +# Use /data for all disk storage +RUN sed -i 's/dir = "\/.*influxdb/dir = "\/data/' /etc/influxdb.toml + +VOLUME ["/data"] + +ENTRYPOINT ["influxd", "--config", "/etc/influxdb.toml"] diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 new file mode 100644 index 000000000000..49466a0a2e5a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 @@ -0,0 +1,35 @@ +FROM ioft/i386-ubuntu:14.04 + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + python-software-properties \ + software-properties-common \ + wget \ + git \ + mercurial \ + make \ + ruby \ + ruby-dev \ + rpm \ + zip \ + python \ + python-boto + +RUN gem install fpm + +# Install go +ENV GOPATH /root/go +ENV GO_VERSION 1.7.4 +ENV GO_ARCH 386 +RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz +ENV PATH /usr/local/go/bin:$PATH + +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb +ENV PATH $GOPATH/bin:$PATH +RUN mkdir -p $PROJECT_DIR +WORKDIR $PROJECT_DIR + +VOLUME $PROJECT_DIR + +ENTRYPOINT [ "/root/go/src/github.com/influxdata/influxdb/build.py" ] diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 new file mode 100644 index 000000000000..b8ceba34bc13 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 @@ -0,0 +1,38 @@ +FROM ubuntu:trusty + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + python-software-properties \ + software-properties-common \ + wget \ + git \ + mercurial \ + make \ + ruby \ + ruby-dev \ + rpm \ + zip \ + python \ + python-boto \ + asciidoc \ + xmlto \ + docbook-xsl + +RUN gem install fpm + +# Install go +ENV GOPATH /root/go +ENV GO_VERSION 1.7.4 +ENV GO_ARCH amd64 +RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz +ENV PATH /usr/local/go/bin:$PATH + +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb +ENV PATH $GOPATH/bin:$PATH +RUN mkdir -p $PROJECT_DIR +WORKDIR $PROJECT_DIR + +VOLUME $PROJECT_DIR + +ENTRYPOINT [ "/root/go/src/github.com/influxdata/influxdb/build.py" ] diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git new file mode 100644 index 000000000000..61227a346944 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git @@ -0,0 +1,43 @@ +FROM ubuntu:trusty + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + python-software-properties \ + software-properties-common \ + wget \ + git \ + mercurial \ + make \ + ruby \ + ruby-dev \ + rpm \ + zip \ + python \ + python-boto + +RUN gem install fpm + +# Setup env +ENV GOPATH /root/go +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb +ENV PATH $GOPATH/bin:$PATH +RUN mkdir -p $PROJECT_DIR + +VOLUME $PROJECT_DIR + + +# Install go +ENV GO_VERSION 1.7.4 +ENV GO_ARCH amd64 +RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz + +# Clone Go tip for compilation +ENV GOROOT_BOOTSTRAP /usr/local/go +RUN git clone https://go.googlesource.com/go +ENV PATH /go/bin:$PATH + +# Add script for compiling go +ENV GO_CHECKOUT master +ADD ./gobuild.sh /gobuild.sh +ENTRYPOINT [ "/gobuild.sh" ] diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_test_ubuntu32 b/vendor/github.com/influxdata/influxdb/Dockerfile_test_ubuntu32 new file mode 100644 index 000000000000..af505b5cd430 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_test_ubuntu32 @@ -0,0 +1,12 @@ +FROM 32bit/ubuntu:14.04 + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y python-software-properties software-properties-common git +RUN add-apt-repository ppa:evarlast/golang1.4 +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y -o Dpkg::Options::="--force-overwrite" golang-go + +ENV GOPATH=/root/go +RUN mkdir -p /root/go/src/github.com/influxdata/influxdb +RUN mkdir -p /tmp/artifacts + +VOLUME /root/go/src/github.com/influxdata/influxdb +VOLUME /tmp/artifacts diff --git a/vendor/github.com/influxdata/influxdb/Godeps b/vendor/github.com/influxdata/influxdb/Godeps new file mode 100644 index 000000000000..7fec58fbb2ef --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Godeps @@ -0,0 +1,20 @@ +collectd.org e84e8af5356e7f47485bbc95c96da6dd7984a67e +github.com/BurntSushi/toml 99064174e013895bbd9b025c31100bd1d9b590ca +github.com/bmizerany/pat c068ca2f0aacee5ac3681d68e4d0a003b7d1fd2c +github.com/boltdb/bolt 4b1ebc1869ad66568b313d0dc410e2be72670dda +github.com/cespare/xxhash 4a94f899c20bc44d4f5f807cb14529e72aca99d6 +github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 +github.com/dgrijalva/jwt-go 24c63f56522a87ec5339cc3567883f1039378fdb +github.com/dgryski/go-bits 2ad8d707cc05b1815ce6ff2543bb5e8d8f9298ef +github.com/dgryski/go-bitstream 7d46cd22db7004f0cceb6f7975824b560cf0e486 +github.com/gogo/protobuf a9cd0c35b97daf74d0ebf3514c5254814b2703b4 +github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380 +github.com/influxdata/usage-client 6d3895376368aa52a3a81d2a16e90f0f52371967 +github.com/jwilder/encoding 4dada27c33277820fe35c7ee71ed34fbc9477d00 +github.com/paulbellamy/ratecounter 5a11f585a31379765c190c033b6ad39956584447 +github.com/peterh/liner 8975875355a81d612fafb9f5a6037bdcc2d9b073 +github.com/rakyll/statik e383bbf6b2ec1a2fb8492dfd152d945fb88919b6 +github.com/retailnext/hllpp 38a7bb71b483e855d35010808143beaf05b67f9d +github.com/uber-go/atomic 9e99152552a6ce13fa3b2ce4a9c4fb117cca4506 +go.uber.org/zap 05dadc4e239529c50d6f730c17f0a3aaf35b64fd +golang.org/x/crypto 9477e0b78b9ac3d0b03822fd95422e2fe07627cd diff --git a/vendor/github.com/influxdata/influxdb/Makefile b/vendor/github.com/influxdata/influxdb/Makefile new file mode 100644 index 000000000000..9fa9c8269cc7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Makefile @@ -0,0 +1,39 @@ +PACKAGES=$(shell find . -name '*.go' -print0 | xargs -0 -n1 dirname | sort --unique) + +default: + +metalint: deadcode cyclo aligncheck defercheck structcheck lint errcheck + +deadcode: + @deadcode $(PACKAGES) 2>&1 + +cyclo: + @gocyclo -over 10 $(PACKAGES) + +aligncheck: + @aligncheck $(PACKAGES) + +defercheck: + @defercheck $(PACKAGES) + + +structcheck: + @structcheck $(PACKAGES) + +lint: + @for pkg in $(PACKAGES); do golint $$pkg; done + +errcheck: + @for pkg in $(PACKAGES); do \ + errcheck -ignorepkg=bytes,fmt -ignore=":(Rollback|Close)" $$pkg \ + done + +tools: + go get github.com/remyoudompheng/go-misc/deadcode + go get github.com/alecthomas/gocyclo + go get github.com/opennota/check/... + go get github.com/golang/lint/golint + go get github.com/kisielk/errcheck + go get github.com/sparrc/gdm + +.PHONY: default,metalint,deadcode,cyclo,aligncheck,defercheck,structcheck,lint,errcheck,tools diff --git a/vendor/github.com/influxdata/influxdb/QUERIES.md b/vendor/github.com/influxdata/influxdb/QUERIES.md new file mode 100644 index 000000000000..46a9eb1dafc2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/QUERIES.md @@ -0,0 +1,180 @@ +The top level name is called a measurement. These names can contain any characters. Then there are field names, field values, tag keys and tag values, which can also contain any characters. However, if the measurement, field, or tag contains any character other than [A-Z,a-z,0-9,_], or if it starts with a digit, it must be double-quoted. Therefore anywhere a measurement name, field key, or tag key appears it should be wrapped in double quotes. + +# Databases & retention policies + +```sql +-- create a database +CREATE DATABASE + +-- create a retention policy +CREATE RETENTION POLICY ON DURATION REPLICATION [DEFAULT] + +-- alter retention policy +ALTER RETENTION POLICY ON (DURATION | REPLICATION | DEFAULT)+ + +-- drop a database +DROP DATABASE + +-- drop a retention policy +DROP RETENTION POLICY ON +``` +where `` is either `INF` for infinite retention, or an integer followed by the desired unit of time: u,ms,s,m,h,d,w for microseconds, milliseconds, seconds, minutes, hours, days, or weeks, respectively. `` must be an integer. + +If present, `DEFAULT` sets the retention policy as the default retention policy for writes and reads. + +# Users and permissions + +```sql +-- create user +CREATE USER WITH PASSWORD '' + +-- grant privilege on a database +GRANT ON TO + +-- grant cluster admin privileges +GRANT ALL [PRIVILEGES] TO + +-- revoke privilege +REVOKE ON FROM + +-- revoke all privileges for a DB +REVOKE ALL [PRIVILEGES] ON FROM + +-- revoke all privileges including cluster admin +REVOKE ALL [PRIVILEGES] FROM + +-- combine db creation with privilege assignment (user must already exist) +CREATE DATABASE GRANT TO +CREATE DATABASE REVOKE FROM + +-- delete a user +DROP USER + + +``` +where ` := READ | WRITE | All `. + +Authentication must be enabled in the influxdb.conf file for user permissions to be in effect. + +By default, newly created users have no privileges to any databases. + +Cluster administration privileges automatically grant full read and write permissions to all databases, regardless of subsequent database-specific privilege revocation statements. + +# Select + +```sql +SELECT mean(value) from cpu WHERE host = 'serverA' AND time > now() - 4h GROUP BY time(5m) + +SELECT mean(value) from cpu WHERE time > now() - 4h GROUP BY time(5m), region +``` + +## Group By + +# Delete + +# Series + +## Destroy + +```sql +DROP MEASUREMENT +DROP MEASUREMENT cpu WHERE region = 'uswest' +``` + +## Show + +Show series queries are for pulling out individual series from measurement names and tag data. They're useful for discovery. + +```sql +-- show all databases +SHOW DATABASES + +-- show measurement names +SHOW MEASUREMENTS +SHOW MEASUREMENTS LIMIT 15 +SHOW MEASUREMENTS LIMIT 10 OFFSET 40 +SHOW MEASUREMENTS WHERE service = 'redis' +-- LIMIT and OFFSET can be applied to any of the SHOW type queries + +-- show all series across all measurements/tagsets +SHOW SERIES + +-- get a show of all series for any measurements where tag key region = tak value 'uswest' +SHOW SERIES WHERE region = 'uswest' + +SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 + +-- returns the 100 - 109 rows in the result. In the case of SHOW SERIES, which returns +-- series split into measurements. Each series counts as a row. So you could see only a +-- single measurement returned, but 10 series within it. +SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 OFFSET 100 + +-- show all retention policies on a database +SHOW RETENTION POLICIES ON mydb + +-- get a show of all tag keys across all measurements +SHOW TAG KEYS + +-- show all the tag keys for a given measurement +SHOW TAG KEYS FROM cpu +SHOW TAG KEYS FROM temperature, wind_speed + +-- show all the tag values. note that a single WHERE TAG KEY = '...' clause is required +SHOW TAG VALUES WITH TAG KEY = 'region' +SHOW TAG VALUES FROM cpu WHERE region = 'uswest' WITH TAG KEY = 'host' + +-- and you can do stuff against fields +SHOW FIELD KEYS FROM cpu + +-- but you can't do this +SHOW FIELD VALUES +-- we don't index field values, so this query should be invalid. + +-- show all users +SHOW USERS +``` + +Note that `FROM` and `WHERE` are optional clauses in most of the show series queries. + +And the show series output looks like this: + +```json +[ + { + "name": "cpu", + "columns": ["id", "region", "host"], + "values": [ + 1, "uswest", "servera", + 2, "uswest", "serverb" + ] + }, + { + "name": "reponse_time", + "columns": ["id", "application", "host"], + "values": [ + 3, "myRailsApp", "servera" + ] + } +] +``` + +# Continuous Queries + +Continuous queries are going to be inspired by MySQL `TRIGGER` syntax: + +http://dev.mysql.com/doc/refman/5.0/en/trigger-syntax.html + +Instead of having automatically-assigned ids, named continuous queries allows for some level of duplication prevention, +particularly in the case where creation is scripted. + +## Create + + CREATE CONTINUOUS QUERY AS SELECT ... FROM ... + +## Destroy + + DROP CONTINUOUS QUERY + +## List + + SHOW CONTINUOUS QUERIES diff --git a/vendor/github.com/influxdata/influxdb/README.md b/vendor/github.com/influxdata/influxdb/README.md new file mode 100644 index 000000000000..8c269ddfcc95 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/README.md @@ -0,0 +1,71 @@ +# InfluxDB [![Circle CI](https://circleci.com/gh/influxdata/influxdb/tree/master.svg?style=svg)](https://circleci.com/gh/influxdata/influxdb/tree/master) [![Go Report Card](https://goreportcard.com/badge/github.com/influxdata/influxdb)](https://goreportcard.com/report/github.com/influxdata/influxdb) [![Docker pulls](https://img.shields.io/docker/pulls/library/influxdb.svg)](https://hub.docker.com/_/influxdb/) + +## An Open-Source Time Series Database + +InfluxDB is an open source **time series database** with +**no external dependencies**. It's useful for recording metrics, +events, and performing analytics. + +## Features + +* Built-in [HTTP API](https://docs.influxdata.com/influxdb/latest/guides/writing_data/) so you don't have to write any server side code to get up and running. +* Data can be tagged, allowing very flexible querying. +* SQL-like query language. +* Simple to install and manage, and fast to get data in and out. +* It aims to answer queries in real-time. That means every data point is + indexed as it comes in and is immediately available in queries that + should return in < 100ms. + +## Installation + +We recommend installing InfluxDB using one of the [pre-built packages](https://influxdata.com/downloads/#influxdb). Then start InfluxDB using: + +* `service influxdb start` if you have installed InfluxDB using an official Debian or RPM package. +* `systemctl start influxdb` if you have installed InfluxDB using an official Debian or RPM package, and are running a distro with `systemd`. For example, Ubuntu 15 or later. +* `$GOPATH/bin/influxd` if you have built InfluxDB from source. + +## Getting Started + +### Create your first database + +``` +curl -G 'http://localhost:8086/query' --data-urlencode "q=CREATE DATABASE mydb" +``` + +### Insert some data +``` +curl -XPOST 'http://localhost:8086/write?db=mydb' \ +-d 'cpu,host=server01,region=uswest load=42 1434055562000000000' + +curl -XPOST 'http://localhost:8086/write?db=mydb' \ +-d 'cpu,host=server02,region=uswest load=78 1434055562000000000' + +curl -XPOST 'http://localhost:8086/write?db=mydb' \ +-d 'cpu,host=server03,region=useast load=15.4 1434055562000000000' +``` + +### Query for the data +```JSON +curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \ +--data-urlencode "q=SELECT * FROM cpu WHERE host='server01' AND time < now() - 1d" +``` + +### Analyze the data +```JSON +curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \ +--data-urlencode "q=SELECT mean(load) FROM cpu WHERE region='uswest'" +``` + +## Documentation + +* Read more about the [design goals and motivations of the project](https://docs.influxdata.com/influxdb/latest/). +* Follow the [getting started guide](https://docs.influxdata.com/influxdb/latest/introduction/getting_started/) to learn the basics in just a few minutes. +* Learn more about [InfluxDB's key concepts](https://docs.influxdata.com/influxdb/latest/guides/writing_data/). + +## Contributing + +If you're feeling adventurous and want to contribute to InfluxDB, see our [contributing doc](https://github.com/influxdata/influxdb/blob/master/CONTRIBUTING.md) for info on how to make feature requests, build from source, and run tests. + +## Looking for Support? + +InfluxDB offers a number of services to help your project succeed. We offer Developer Support for organizations in active development, Managed Hosting to make it easy to move into production, and Enterprise Support for companies requiring the best response times, SLAs, and technical fixes. Visit our [support page](https://influxdata.com/services/) or contact [sales@influxdb.com](mailto:sales@influxdb.com) to learn how we can best help you succeed. diff --git a/vendor/github.com/influxdata/influxdb/TODO.md b/vendor/github.com/influxdata/influxdb/TODO.md new file mode 100644 index 000000000000..56b5294b450c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/TODO.md @@ -0,0 +1,9 @@ +# TODO + +## v2 + +TODO list for v2. Here is a list of things we want to add to v1, but can't because they would be a breaking change. + +- [#1834](https://github.com/influxdata/influxdb/issues/1834): Disallow using time as a tag key or field key. +- [#2124](https://github.com/influxdata/influxdb/issues/2124): Prohibit writes with precision, but without an explicit timestamp. +- [#4461](https://github.com/influxdata/influxdb/issues/4461): Change default time boundaries. diff --git a/vendor/github.com/influxdata/influxdb/appveyor.yml b/vendor/github.com/influxdata/influxdb/appveyor.yml new file mode 100644 index 000000000000..05f77efbc51a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/appveyor.yml @@ -0,0 +1,37 @@ +version: 0.{build} +pull_requests: + do_not_increment_build_number: true +branches: + only: + - master + +os: Windows Server 2012 R2 + +# Custom clone folder (variables are not expanded here). +clone_folder: c:\gopath\src\github.com\influxdata\influxdb + +# Environment variables +environment: + GOROOT: C:\go17 + GOPATH: C:\gopath + +# Scripts that run after cloning repository +install: + - set PATH=%GOROOT%\bin;%GOPATH%\bin;%PATH% + - rmdir c:\go /s /q + - echo %PATH% + - echo %GOPATH% + - cd C:\gopath\src\github.com\influxdata\influxdb + - go version + - go env + - go get github.com/sparrc/gdm + - cd C:\gopath\src\github.com\influxdata\influxdb + - gdm restore + +# To run your custom scripts instead of automatic MSBuild +build_script: + - go get -t -v ./... + - go test -race -v ./... + +# To disable deployment +deploy: off diff --git a/vendor/github.com/influxdata/influxdb/build-docker.sh b/vendor/github.com/influxdata/influxdb/build-docker.sh new file mode 100755 index 000000000000..3e45beac776e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/build-docker.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +set -e -x + +GO_VER=${GO_VER:-1.7.4} + +docker run -it -v "${GOPATH}":/gopath -v "$(pwd)":/app -e "GOPATH=/gopath" -w /app golang:$GO_VER sh -c 'CGO_ENABLED=0 go build -a --installsuffix cgo --ldflags="-s" -o influxd ./cmd/influxd' + +docker build -t influxdb . diff --git a/vendor/github.com/influxdata/influxdb/build.py b/vendor/github.com/influxdata/influxdb/build.py new file mode 100755 index 000000000000..46933199d66f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/build.py @@ -0,0 +1,987 @@ +#!/usr/bin/python2.7 -u + +import sys +import os +import subprocess +import time +from datetime import datetime +import shutil +import tempfile +import hashlib +import re +import logging +import argparse + +################ +#### InfluxDB Variables +################ + +# Packaging variables +PACKAGE_NAME = "influxdb" +INSTALL_ROOT_DIR = "/usr/bin" +LOG_DIR = "/var/log/influxdb" +DATA_DIR = "/var/lib/influxdb" +SCRIPT_DIR = "/usr/lib/influxdb/scripts" +CONFIG_DIR = "/etc/influxdb" +LOGROTATE_DIR = "/etc/logrotate.d" +MAN_DIR = "/usr/share/man" + +INIT_SCRIPT = "scripts/init.sh" +SYSTEMD_SCRIPT = "scripts/influxdb.service" +PREINST_SCRIPT = "scripts/pre-install.sh" +POSTINST_SCRIPT = "scripts/post-install.sh" +POSTUNINST_SCRIPT = "scripts/post-uninstall.sh" +LOGROTATE_SCRIPT = "scripts/logrotate" +DEFAULT_CONFIG = "etc/config.sample.toml" + +# Default AWS S3 bucket for uploads +DEFAULT_BUCKET = "dl.influxdata.com/influxdb/artifacts" + +CONFIGURATION_FILES = [ + CONFIG_DIR + '/influxdb.conf', + LOGROTATE_DIR + '/influxdb', +] + +PACKAGE_LICENSE = "MIT" +PACKAGE_URL = "https://github.com/influxdata/influxdb" +MAINTAINER = "support@influxdb.com" +VENDOR = "InfluxData" +DESCRIPTION = "Distributed time-series database." + +prereqs = [ 'git', 'go' ] +go_vet_command = "go tool vet ./" +optional_prereqs = [ 'fpm', 'rpmbuild', 'gpg' ] + +fpm_common_args = "-f -s dir --log error \ +--vendor {} \ +--url {} \ +--after-install {} \ +--before-install {} \ +--after-remove {} \ +--license {} \ +--maintainer {} \ +--directories {} \ +--directories {} \ +--directories {} \ +--description \"{}\"".format( + VENDOR, + PACKAGE_URL, + POSTINST_SCRIPT, + PREINST_SCRIPT, + POSTUNINST_SCRIPT, + PACKAGE_LICENSE, + MAINTAINER, + LOG_DIR, + DATA_DIR, + MAN_DIR, + DESCRIPTION) + +for f in CONFIGURATION_FILES: + fpm_common_args += " --config-files {}".format(f) + +targets = { + 'influx' : './cmd/influx', + 'influxd' : './cmd/influxd', + 'influx_stress' : './cmd/influx_stress', + 'influx_inspect' : './cmd/influx_inspect', + 'influx_tsm' : './cmd/influx_tsm', +} + +supported_builds = { + 'darwin': [ "amd64" ], + 'windows': [ "amd64" ], + 'linux': [ "amd64", "i386", "armhf", "arm64", "armel", "static_i386", "static_amd64" ] +} + +supported_packages = { + "darwin": [ "tar" ], + "linux": [ "deb", "rpm", "tar" ], + "windows": [ "zip" ], +} + +################ +#### InfluxDB Functions +################ + +def print_banner(): + logging.info(""" + ___ __ _ ___ ___ + |_ _|_ _ / _| |_ ___ _| \\| _ ) + | || ' \\| _| | || \\ \\ / |) | _ \\ + |___|_||_|_| |_|\\_,_/_\\_\\___/|___/ + Build Script +""") + +def create_package_fs(build_root): + """Create a filesystem structure to mimic the package filesystem. + """ + logging.debug("Creating package filesystem at location: {}".format(build_root)) + # Using [1:] for the path names due to them being absolute + # (will overwrite previous paths, per 'os.path.join' documentation) + dirs = [ INSTALL_ROOT_DIR[1:], + LOG_DIR[1:], + DATA_DIR[1:], + SCRIPT_DIR[1:], + CONFIG_DIR[1:], + LOGROTATE_DIR[1:], + MAN_DIR[1:] ] + for d in dirs: + os.makedirs(os.path.join(build_root, d)) + os.chmod(os.path.join(build_root, d), 0o755) + +def package_scripts(build_root, config_only=False, windows=False): + """Copy the necessary scripts and configuration files to the package + filesystem. + """ + if config_only: + logging.debug("Copying configuration to build directory.") + shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "influxdb.conf")) + os.chmod(os.path.join(build_root, "influxdb.conf"), 0o644) + else: + logging.debug("Copying scripts and sample configuration to build directory.") + shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644) + shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644) + shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb")) + os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb"), 0o644) + shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf")) + os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf"), 0o644) + +def package_man_files(build_root): + """Copy and gzip man pages to the package filesystem.""" + logging.debug("Installing man pages.") + run("make -C man/ clean install DESTDIR={}/usr".format(build_root)) + for path, dir, files in os.walk(os.path.join(build_root, MAN_DIR[1:])): + for f in files: + run("gzip -9n {}".format(os.path.join(path, f))) + +def run_generate(): + """Run 'go generate' to rebuild any static assets. + """ + logging.info("Running 'go generate'...") + if not check_path_for("statik"): + run("go install github.com/rakyll/statik") + orig_path = None + if os.path.join(os.environ.get("GOPATH"), "bin") not in os.environ["PATH"].split(os.pathsep): + orig_path = os.environ["PATH"].split(os.pathsep) + os.environ["PATH"] = os.environ["PATH"].split(os.pathsep).append(os.path.join(os.environ.get("GOPATH"), "bin")) + run("rm -f ./services/admin/statik/statik.go") + run("go generate ./services/admin") + if orig_path is not None: + os.environ["PATH"] = orig_path + return True + +def go_get(branch, update=False, no_uncommitted=False): + """Retrieve build dependencies or restore pinned dependencies. + """ + if local_changes() and no_uncommitted: + logging.error("There are uncommitted changes in the current directory.") + return False + if not check_path_for("gdm"): + logging.info("Downloading `gdm`...") + get_command = "go get github.com/sparrc/gdm" + run(get_command) + logging.info("Retrieving dependencies with `gdm`...") + sys.stdout.flush() + run("{}/bin/gdm restore -v".format(os.environ.get("GOPATH"))) + return True + +def run_tests(race, parallel, timeout, no_vet): + """Run the Go test suite on binary output. + """ + logging.info("Starting tests...") + if race: + logging.info("Race is enabled.") + if parallel is not None: + logging.info("Using parallel: {}".format(parallel)) + if timeout is not None: + logging.info("Using timeout: {}".format(timeout)) + out = run("go fmt ./...") + if len(out) > 0: + logging.error("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.") + logging.error("{}".format(out)) + return False + if not no_vet: + logging.info("Running 'go vet'...") + out = run(go_vet_command) + if len(out) > 0: + logging.error("Go vet failed. Please run 'go vet ./...' and fix any errors.") + logging.error("{}".format(out)) + return False + else: + logging.info("Skipping 'go vet' call...") + test_command = "go test -v" + if race: + test_command += " -race" + if parallel is not None: + test_command += " -parallel {}".format(parallel) + if timeout is not None: + test_command += " -timeout {}".format(timeout) + test_command += " ./..." + logging.info("Running tests...") + output = run(test_command) + logging.debug("Test output:\n{}".format(output.encode('ascii', 'ignore'))) + return True + +################ +#### All InfluxDB-specific content above this line +################ + +def run(command, allow_failure=False, shell=False): + """Run shell command (convenience wrapper around subprocess). + """ + out = None + logging.debug("{}".format(command)) + try: + if shell: + out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell) + else: + out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT) + out = out.decode('utf-8').strip() + # logging.debug("Command output: {}".format(out)) + except subprocess.CalledProcessError as e: + if allow_failure: + logging.warn("Command '{}' failed with error: {}".format(command, e.output)) + return None + else: + logging.error("Command '{}' failed with error: {}".format(command, e.output)) + sys.exit(1) + except OSError as e: + if allow_failure: + logging.warn("Command '{}' failed with error: {}".format(command, e)) + return out + else: + logging.error("Command '{}' failed with error: {}".format(command, e)) + sys.exit(1) + else: + return out + +def create_temp_dir(prefix = None): + """ Create temporary directory with optional prefix. + """ + if prefix is None: + return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME)) + else: + return tempfile.mkdtemp(prefix=prefix) + +def increment_minor_version(version): + """Return the version with the minor version incremented and patch + version set to zero. + """ + ver_list = version.split('.') + if len(ver_list) != 3: + logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version)) + return version + ver_list[1] = str(int(ver_list[1]) + 1) + ver_list[2] = str(0) + inc_version = '.'.join(ver_list) + logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version)) + return inc_version + +def get_current_version_tag(): + """Retrieve the raw git version tag. + """ + version = run("git describe --always --tags --abbrev=0") + return version + +def get_current_version(): + """Parse version information from git tag output. + """ + version_tag = get_current_version_tag() + # Remove leading 'v' + if version_tag[0] == 'v': + version_tag = version_tag[1:] + # Replace any '-'/'_' with '~' + if '-' in version_tag: + version_tag = version_tag.replace("-","~") + if '_' in version_tag: + version_tag = version_tag.replace("_","~") + return version_tag + +def get_current_commit(short=False): + """Retrieve the current git commit. + """ + command = None + if short: + command = "git log --pretty=format:'%h' -n 1" + else: + command = "git rev-parse HEAD" + out = run(command) + return out.strip('\'\n\r ') + +def get_current_branch(): + """Retrieve the current git branch. + """ + command = "git rev-parse --abbrev-ref HEAD" + out = run(command) + return out.strip() + +def local_changes(): + """Return True if there are local un-committed changes. + """ + output = run("git diff-files --ignore-submodules --").strip() + if len(output) > 0: + return True + return False + +def get_system_arch(): + """Retrieve current system architecture. + """ + arch = os.uname()[4] + if arch == "x86_64": + arch = "amd64" + elif arch == "386": + arch = "i386" + elif arch == "aarch64": + arch = "arm64" + elif 'arm' in arch: + # Prevent uname from reporting full ARM arch (eg 'armv7l') + arch = "arm" + return arch + +def get_system_platform(): + """Retrieve current system platform. + """ + if sys.platform.startswith("linux"): + return "linux" + else: + return sys.platform + +def get_go_version(): + """Retrieve version information for Go. + """ + out = run("go version") + matches = re.search('go version go(\S+)', out) + if matches is not None: + return matches.groups()[0].strip() + return None + +def check_path_for(b): + """Check the the user's path for the provided binary. + """ + def is_exe(fpath): + return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + + for path in os.environ["PATH"].split(os.pathsep): + path = path.strip('"') + full_path = os.path.join(path, b) + if os.path.isfile(full_path) and os.access(full_path, os.X_OK): + return full_path + +def check_environ(build_dir = None): + """Check environment for common Go variables. + """ + logging.info("Checking environment...") + for v in [ "GOPATH", "GOBIN", "GOROOT" ]: + logging.debug("Using '{}' for {}".format(os.environ.get(v), v)) + + cwd = os.getcwd() + if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd: + logging.warn("Your current directory is not under your GOPATH. This may lead to build failures.") + return True + +def check_prereqs(): + """Check user path for required dependencies. + """ + logging.info("Checking for dependencies...") + for req in prereqs: + if not check_path_for(req): + logging.error("Could not find dependency: {}".format(req)) + return False + return True + +def upload_packages(packages, bucket_name=None, overwrite=False): + """Upload provided package output to AWS S3. + """ + logging.debug("Uploading files to bucket '{}': {}".format(bucket_name, packages)) + try: + import boto + from boto.s3.key import Key + from boto.s3.connection import OrdinaryCallingFormat + logging.getLogger("boto").setLevel(logging.WARNING) + except ImportError: + logging.warn("Cannot upload packages without 'boto' Python library!") + return False + logging.info("Connecting to AWS S3...") + # Up the number of attempts to 10 from default of 1 + boto.config.add_section("Boto") + boto.config.set("Boto", "metadata_service_num_attempts", "10") + c = boto.connect_s3(calling_format=OrdinaryCallingFormat()) + if bucket_name is None: + bucket_name = DEFAULT_BUCKET + bucket = c.get_bucket(bucket_name.split('/')[0]) + for p in packages: + if '/' in bucket_name: + # Allow for nested paths within the bucket name (ex: + # bucket/folder). Assuming forward-slashes as path + # delimiter. + name = os.path.join('/'.join(bucket_name.split('/')[1:]), + os.path.basename(p)) + else: + name = os.path.basename(p) + logging.debug("Using key: {}".format(name)) + if bucket.get_key(name) is None or overwrite: + logging.info("Uploading file {}".format(name)) + k = Key(bucket) + k.key = name + if overwrite: + n = k.set_contents_from_filename(p, replace=True) + else: + n = k.set_contents_from_filename(p, replace=False) + k.make_public() + else: + logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name)) + return True + +def go_list(vendor=False, relative=False): + """ + Return a list of packages + If vendor is False vendor package are not included + If relative is True the package prefix defined by PACKAGE_URL is stripped + """ + p = subprocess.Popen(["go", "list", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + packages = out.split('\n') + if packages[-1] == '': + packages = packages[:-1] + if not vendor: + non_vendor = [] + for p in packages: + if '/vendor/' not in p: + non_vendor.append(p) + packages = non_vendor + if relative: + relative_pkgs = [] + for p in packages: + r = p.replace(PACKAGE_URL, '.') + if r != '.': + relative_pkgs.append(r) + packages = relative_pkgs + return packages + +def build(version=None, + platform=None, + arch=None, + nightly=False, + race=False, + clean=False, + outdir=".", + tags=[], + static=False): + """Build each target for the specified architecture and platform. + """ + logging.info("Starting build for {}/{}...".format(platform, arch)) + logging.info("Using Go version: {}".format(get_go_version())) + logging.info("Using git branch: {}".format(get_current_branch())) + logging.info("Using git commit: {}".format(get_current_commit())) + if static: + logging.info("Using statically-compiled output.") + if race: + logging.info("Race is enabled.") + if len(tags) > 0: + logging.info("Using build tags: {}".format(','.join(tags))) + + logging.info("Sending build output to: {}".format(outdir)) + if not os.path.exists(outdir): + os.makedirs(outdir) + elif clean and outdir != '/' and outdir != ".": + logging.info("Cleaning build directory '{}' before building.".format(outdir)) + shutil.rmtree(outdir) + os.makedirs(outdir) + + logging.info("Using version '{}' for build.".format(version)) + + for target, path in targets.items(): + logging.info("Building target: {}".format(target)) + build_command = "" + + # Handle static binary output + if static is True or "static_" in arch: + if "static_" in arch: + static = True + arch = arch.replace("static_", "") + build_command += "CGO_ENABLED=0 " + + # Handle variations in architecture output + if arch == "i386" or arch == "i686": + arch = "386" + elif "arm" in arch: + arch = "arm" + build_command += "GOOS={} GOARCH={} ".format(platform, arch) + + if "arm" in arch: + if arch == "armel": + build_command += "GOARM=5 " + elif arch == "armhf" or arch == "arm": + build_command += "GOARM=6 " + elif arch == "arm64": + # TODO(rossmcdonald) - Verify this is the correct setting for arm64 + build_command += "GOARM=7 " + else: + logging.error("Invalid ARM architecture specified: {}".format(arch)) + logging.error("Please specify either 'armel', 'armhf', or 'arm64'.") + return False + if platform == 'windows': + target = target + '.exe' + build_command += "go build -o {} ".format(os.path.join(outdir, target)) + if race: + build_command += "-race " + if len(tags) > 0: + build_command += "-tags {} ".format(','.join(tags)) + if "1.4" in get_go_version(): + if static: + build_command += "-ldflags=\"-s -X main.version {} -X main.branch {} -X main.commit {}\" ".format(version, + get_current_branch(), + get_current_commit()) + else: + build_command += "-ldflags=\"-X main.version {} -X main.branch {} -X main.commit {}\" ".format(version, + get_current_branch(), + get_current_commit()) + + else: + # Starting with Go 1.5, the linker flag arguments changed to 'name=value' from 'name value' + if static: + build_command += "-ldflags=\"-s -X main.version={} -X main.branch={} -X main.commit={}\" ".format(version, + get_current_branch(), + get_current_commit()) + else: + build_command += "-ldflags=\"-X main.version={} -X main.branch={} -X main.commit={}\" ".format(version, + get_current_branch(), + get_current_commit()) + if static: + build_command += "-a -installsuffix cgo " + build_command += path + start_time = datetime.utcnow() + run(build_command, shell=True) + end_time = datetime.utcnow() + logging.info("Time taken: {}s".format((end_time - start_time).total_seconds())) + return True + +def generate_md5_from_file(path): + """Generate MD5 signature based on the contents of the file at path. + """ + m = hashlib.md5() + with open(path, 'rb') as f: + for chunk in iter(lambda: f.read(4096), b""): + m.update(chunk) + return m.hexdigest() + +def generate_sig_from_file(path): + """Generate a detached GPG signature from the file at path. + """ + logging.debug("Generating GPG signature for file: {}".format(path)) + gpg_path = check_path_for('gpg') + if gpg_path is None: + logging.warn("gpg binary not found on path! Skipping signature creation.") + return False + if os.environ.get("GNUPG_HOME") is not None: + run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get("GNUPG_HOME"), path)) + else: + run('gpg --armor --detach-sign --yes {}'.format(path)) + return True + +def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False): + """Package the output of the build process. + """ + outfiles = [] + tmp_build_dir = create_temp_dir() + logging.debug("Packaging for build output: {}".format(build_output)) + logging.info("Using temporary directory: {}".format(tmp_build_dir)) + try: + for platform in build_output: + # Create top-level folder displaying which platform (linux, etc) + os.makedirs(os.path.join(tmp_build_dir, platform)) + for arch in build_output[platform]: + logging.info("Creating packages for {}/{}".format(platform, arch)) + # Create second-level directory displaying the architecture (amd64, etc) + current_location = build_output[platform][arch] + + # Create directory tree to mimic file system of package + build_root = os.path.join(tmp_build_dir, + platform, + arch, + '{}-{}-{}'.format(PACKAGE_NAME, version, iteration)) + os.makedirs(build_root) + + # Copy packaging scripts to build directory + if platform == "windows": + # For windows and static builds, just copy + # binaries to root of package (no other scripts or + # directories) + package_scripts(build_root, config_only=True, windows=True) + elif static or "static_" in arch: + package_scripts(build_root, config_only=True) + else: + create_package_fs(build_root) + package_scripts(build_root) + + if platform != "windows": + package_man_files(build_root) + + for binary in targets: + # Copy newly-built binaries to packaging directory + if platform == 'windows': + binary = binary + '.exe' + if platform == 'windows' or static or "static_" in arch: + # Where the binary should go in the package filesystem + to = os.path.join(build_root, binary) + # Where the binary currently is located + fr = os.path.join(current_location, binary) + else: + # Where the binary currently is located + fr = os.path.join(current_location, binary) + # Where the binary should go in the package filesystem + to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary) + shutil.copy(fr, to) + + for package_type in supported_packages[platform]: + # Package the directory structure for each package type for the platform + logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type)) + name = pkg_name + # Reset version, iteration, and current location on each run + # since they may be modified below. + package_version = version + package_iteration = iteration + if "static_" in arch: + # Remove the "static_" from the displayed arch on the package + package_arch = arch.replace("static_", "") + else: + package_arch = arch + if not release and not nightly: + # For non-release builds, just use the commit hash as the version + package_version = "{}~{}".format(version, + get_current_commit(short=True)) + package_iteration = "0" + package_build_root = build_root + current_location = build_output[platform][arch] + + if package_type in ['zip', 'tar']: + # For tars and zips, start the packaging one folder above + # the build root (to include the package name) + package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1])) + if nightly: + if static or "static_" in arch: + name = '{}-static-nightly_{}_{}'.format(name, + platform, + package_arch) + else: + name = '{}-nightly_{}_{}'.format(name, + platform, + package_arch) + else: + if static or "static_" in arch: + name = '{}-{}-static_{}_{}'.format(name, + package_version, + platform, + package_arch) + else: + name = '{}-{}_{}_{}'.format(name, + package_version, + platform, + package_arch) + current_location = os.path.join(os.getcwd(), current_location) + if package_type == 'tar': + tar_command = "cd {} && tar -cvzf {}.tar.gz ./*".format(package_build_root, name) + run(tar_command, shell=True) + run("mv {}.tar.gz {}".format(os.path.join(package_build_root, name), current_location), shell=True) + outfile = os.path.join(current_location, name + ".tar.gz") + outfiles.append(outfile) + elif package_type == 'zip': + zip_command = "cd {} && zip -r {}.zip ./*".format(package_build_root, name) + run(zip_command, shell=True) + run("mv {}.zip {}".format(os.path.join(package_build_root, name), current_location), shell=True) + outfile = os.path.join(current_location, name + ".zip") + outfiles.append(outfile) + elif package_type not in ['zip', 'tar'] and static or "static_" in arch: + logging.info("Skipping package type '{}' for static builds.".format(package_type)) + else: + fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( + fpm_common_args, + name, + package_arch, + package_type, + package_version, + package_iteration, + package_build_root, + current_location) + if package_type == "rpm": + fpm_command += "--depends coreutils --rpm-posttrans {}".format(POSTINST_SCRIPT) + out = run(fpm_command, shell=True) + matches = re.search(':path=>"(.*)"', out) + outfile = None + if matches is not None: + outfile = matches.groups()[0] + if outfile is None: + logging.warn("Could not determine output from packaging output!") + else: + if nightly: + # Strip nightly version from package name + new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), "nightly") + os.rename(outfile, new_outfile) + outfile = new_outfile + else: + if package_type == 'rpm': + # rpm's convert any dashes to underscores + package_version = package_version.replace("-", "_") + new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), package_version) + os.rename(outfile, new_outfile) + outfile = new_outfile + outfiles.append(os.path.join(os.getcwd(), outfile)) + logging.debug("Produced package files: {}".format(outfiles)) + return outfiles + finally: + # Cleanup + shutil.rmtree(tmp_build_dir) + +def main(args): + global PACKAGE_NAME + + if args.release and args.nightly: + logging.error("Cannot be both a nightly and a release.") + return 1 + + if args.nightly: + args.version = increment_minor_version(args.version) + args.version = "{}~n{}".format(args.version, + datetime.utcnow().strftime("%Y%m%d%H%M")) + args.iteration = 0 + + # Pre-build checks + check_environ() + if not check_prereqs(): + return 1 + if args.build_tags is None: + args.build_tags = [] + else: + args.build_tags = args.build_tags.split(',') + + orig_commit = get_current_commit(short=True) + orig_branch = get_current_branch() + + if args.platform not in supported_builds and args.platform != 'all': + logging.error("Invalid build platform: {}".format(target_platform)) + return 1 + + build_output = {} + + if args.branch != orig_branch and args.commit != orig_commit: + logging.error("Can only specify one branch or commit to build from.") + return 1 + elif args.branch != orig_branch: + logging.info("Moving to git branch: {}".format(args.branch)) + run("git checkout {}".format(args.branch)) + elif args.commit != orig_commit: + logging.info("Moving to git commit: {}".format(args.commit)) + run("git checkout {}".format(args.commit)) + + if not args.no_get: + if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted): + return 1 + + if args.generate: + if not run_generate(): + return 1 + + if args.test: + if not run_tests(args.race, args.parallel, args.timeout, args.no_vet): + return 1 + + platforms = [] + single_build = True + if args.platform == 'all': + platforms = supported_builds.keys() + single_build = False + else: + platforms = [args.platform] + + for platform in platforms: + build_output.update( { platform : {} } ) + archs = [] + if args.arch == "all": + single_build = False + archs = supported_builds.get(platform) + else: + archs = [args.arch] + + for arch in archs: + od = args.outdir + if not single_build: + od = os.path.join(args.outdir, platform, arch) + if not build(version=args.version, + platform=platform, + arch=arch, + nightly=args.nightly, + race=args.race, + clean=args.clean, + outdir=od, + tags=args.build_tags, + static=args.static): + return 1 + build_output.get(platform).update( { arch : od } ) + + # Build packages + if args.package: + if not check_path_for("fpm"): + logging.error("FPM ruby gem required for packaging. Stopping.") + return 1 + packages = package(build_output, + args.name, + args.version, + nightly=args.nightly, + iteration=args.iteration, + static=args.static, + release=args.release) + if args.sign: + logging.debug("Generating GPG signatures for packages: {}".format(packages)) + sigs = [] # retain signatures so they can be uploaded with packages + for p in packages: + if generate_sig_from_file(p): + sigs.append(p + '.asc') + else: + logging.error("Creation of signature for package [{}] failed!".format(p)) + return 1 + packages += sigs + if args.upload: + logging.debug("Files staged for upload: {}".format(packages)) + if args.nightly: + args.upload_overwrite = True + if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite): + return 1 + logging.info("Packages created:") + for p in packages: + logging.info("{} (MD5={})".format(p.split('/')[-1:][0], + generate_md5_from_file(p))) + if orig_branch != get_current_branch(): + logging.info("Moving back to original git branch: {}".format(orig_branch)) + run("git checkout {}".format(orig_branch)) + + return 0 + +if __name__ == '__main__': + LOG_LEVEL = logging.INFO + if '--debug' in sys.argv[1:]: + LOG_LEVEL = logging.DEBUG + log_format = '[%(levelname)s] %(funcName)s: %(message)s' + logging.basicConfig(level=LOG_LEVEL, + format=log_format) + + parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.') + parser.add_argument('--verbose','-v','--debug', + action='store_true', + help='Use debug output') + parser.add_argument('--outdir', '-o', + metavar='', + default='./build/', + type=os.path.abspath, + help='Output directory') + parser.add_argument('--name', '-n', + metavar='', + default=PACKAGE_NAME, + type=str, + help='Name to use for package name (when package is specified)') + parser.add_argument('--arch', + metavar='', + type=str, + default=get_system_arch(), + help='Target architecture for build output') + parser.add_argument('--platform', + metavar='', + type=str, + default=get_system_platform(), + help='Target platform for build output') + parser.add_argument('--branch', + metavar='', + type=str, + default=get_current_branch(), + help='Build from a specific branch') + parser.add_argument('--commit', + metavar='', + type=str, + default=get_current_commit(short=True), + help='Build from a specific commit') + parser.add_argument('--version', + metavar='', + type=str, + default=get_current_version(), + help='Version information to apply to build output (ex: 0.12.0)') + parser.add_argument('--iteration', + metavar='', + type=str, + default="1", + help='Package iteration to apply to build output (defaults to 1)') + parser.add_argument('--stats', + action='store_true', + help='Emit build metrics (requires InfluxDB Python client)') + parser.add_argument('--stats-server', + metavar='', + type=str, + help='Send build stats to InfluxDB using provided hostname and port') + parser.add_argument('--stats-db', + metavar='', + type=str, + help='Send build stats to InfluxDB using provided database name') + parser.add_argument('--nightly', + action='store_true', + help='Mark build output as nightly build (will incremement the minor version)') + parser.add_argument('--update', + action='store_true', + help='Update build dependencies prior to building') + parser.add_argument('--package', + action='store_true', + help='Package binary output') + parser.add_argument('--release', + action='store_true', + help='Mark build output as release') + parser.add_argument('--clean', + action='store_true', + help='Clean output directory before building') + parser.add_argument('--no-get', + action='store_true', + help='Do not retrieve pinned dependencies when building') + parser.add_argument('--no-uncommitted', + action='store_true', + help='Fail if uncommitted changes exist in the working directory') + parser.add_argument('--upload', + action='store_true', + help='Upload output packages to AWS S3') + parser.add_argument('--upload-overwrite','-w', + action='store_true', + help='Upload output packages to AWS S3') + parser.add_argument('--bucket', + metavar='', + type=str, + default=DEFAULT_BUCKET, + help='Destination bucket for uploads') + parser.add_argument('--generate', + action='store_true', + help='Run "go generate" before building') + parser.add_argument('--build-tags', + metavar='', + help='Optional build tags to use for compilation') + parser.add_argument('--static', + action='store_true', + help='Create statically-compiled binary output') + parser.add_argument('--sign', + action='store_true', + help='Create GPG detached signatures for packages (when package is specified)') + parser.add_argument('--test', + action='store_true', + help='Run tests (does not produce build output)') + parser.add_argument('--no-vet', + action='store_true', + help='Do not run "go vet" when running tests') + parser.add_argument('--race', + action='store_true', + help='Enable race flag for build output') + parser.add_argument('--parallel', + metavar='', + type=int, + help='Number of tests to run simultaneously') + parser.add_argument('--timeout', + metavar='', + type=str, + help='Timeout for tests before failing') + args = parser.parse_args() + print_banner() + sys.exit(main(args)) diff --git a/vendor/github.com/influxdata/influxdb/build.sh b/vendor/github.com/influxdata/influxdb/build.sh new file mode 100755 index 000000000000..0f80ac78728d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/build.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Run the build utility via Docker + +set -e + +# Make sure our working dir is the dir of the script +DIR=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd) +cd $DIR + + +# Build new docker image +docker build -f Dockerfile_build_ubuntu64 -t influxdb-builder $DIR +echo "Running build.py" +# Run docker +docker run --rm \ + -e AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" \ + -e AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" \ + -v $HOME/.aws.conf:/root/.aws.conf \ + -v $DIR:/root/go/src/github.com/influxdata/influxdb \ + influxdb-builder \ + "$@" + diff --git a/vendor/github.com/influxdata/influxdb/circle-test.sh b/vendor/github.com/influxdata/influxdb/circle-test.sh new file mode 100755 index 000000000000..6b34043b18d6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/circle-test.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# +# This is the InfluxDB test script for CircleCI, it is a light wrapper around ./test.sh. + +# Exit if any command fails +set -e + +# Get dir of script and make it is our working directory. +DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) +cd $DIR + +export OUTPUT_DIR="$CIRCLE_ARTIFACTS" +# Don't delete the container since CircleCI doesn't have permission to do so. +export DOCKER_RM="false" + +# Get number of test environments. +count=$(./test.sh count) +# Check that we aren't wasting CircleCI nodes. +if [ $CIRCLE_NODE_TOTAL -gt $count ] +then + echo "More CircleCI nodes allocated than tests environments to run!" + exit 1 +fi + +# Map CircleCI nodes to test environments. +tests=$(seq 0 $((count - 1))) +for i in $tests +do + mine=$(( $i % $CIRCLE_NODE_TOTAL )) + if [ $mine -eq $CIRCLE_NODE_INDEX ] + then + echo "Running test env index: $i" + ./test.sh $i + fi +done diff --git a/vendor/github.com/influxdata/influxdb/circle.yml b/vendor/github.com/influxdata/influxdb/circle.yml new file mode 100644 index 000000000000..cac1004900c4 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/circle.yml @@ -0,0 +1,41 @@ +machine: + services: + - docker + environment: + GODIST: "go1.7.4.linux-amd64.tar.gz" + post: + - mkdir -p download + - test -e download/$GODIST || curl -o download/$GODIST https://storage.googleapis.com/golang/$GODIST + - sudo rm -rf /usr/local/go + - sudo tar -C /usr/local -xzf download/$GODIST + +dependencies: + cache_directories: + - "~/docker" + - ~/download + override: + - ./test.sh save: + # building the docker images can take a long time, hence caching + timeout: 1800 + +test: + override: + - bash circle-test.sh: + parallel: true + +deployment: + release: + tag: /^v[0-9]+(\.[0-9]+)*(\S*)$/ + commands: + - > + docker run + -e "AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" + -e "AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY" + -v $(pwd):/root/go/src/github.com/influxdata/influxdb + influxdb_build_ubuntu64 + --release + --package + --platform all + --arch all + --upload + --bucket dl.influxdata.com/influxdb/releases diff --git a/vendor/github.com/influxdata/influxdb/client/BUILD b/vendor/github.com/influxdata/influxdb/client/BUILD new file mode 100644 index 000000000000..68b6ea841579 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/BUILD @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["influxdb.go"], + importpath = "github.com/influxdata/influxdb/client", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/influxdata/influxdb/models:go_default_library"], +) + +go_test( + name = "go_default_xtest", + srcs = [ + "example_test.go", + "influxdb_test.go", + ], + importpath = "github.com/influxdata/influxdb/client_test", + deps = [":go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/influxdata/influxdb/client/v2:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/influxdata/influxdb/client/README.md b/vendor/github.com/influxdata/influxdb/client/README.md new file mode 100644 index 000000000000..47abd2e6b7c4 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/README.md @@ -0,0 +1,292 @@ +# InfluxDB Client + +[![GoDoc](https://godoc.org/github.com/influxdata/influxdb?status.svg)](http://godoc.org/github.com/influxdata/influxdb/client/v2) + +## Description + +**NOTE:** The Go client library now has a "v2" version, with the old version +being deprecated. The new version can be imported at +`import "github.com/influxdata/influxdb/client/v2"`. It is not backwards-compatible. + +A Go client library written and maintained by the **InfluxDB** team. +This package provides convenience functions to read and write time series data. +It uses the HTTP protocol to communicate with your **InfluxDB** cluster. + + +## Getting Started + +### Connecting To Your Database + +Connecting to an **InfluxDB** database is straightforward. You will need a host +name, a port and the cluster user credentials if applicable. The default port is +8086. You can customize these settings to your specific installation via the +**InfluxDB** configuration file. + +Though not necessary for experimentation, you may want to create a new user +and authenticate the connection to your database. + +For more information please check out the +[Admin Docs](https://docs.influxdata.com/influxdb/latest/administration/). + +For the impatient, you can create a new admin user _bubba_ by firing off the +[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go). + +```shell +influx +> create user bubba with password 'bumblebeetuna' +> grant all privileges to bubba +``` + +And now for good measure set the credentials in you shell environment. +In the example below we will use $INFLUX_USER and $INFLUX_PWD + +Now with the administrivia out of the way, let's connect to our database. + +NOTE: If you've opted out of creating a user, you can omit Username and Password in +the configuration below. + +```go +package main + +import ( + "log" + "time" + + "github.com/influxdata/influxdb/client/v2" +) + +const ( + MyDB = "square_holes" + username = "bubba" + password = "bumblebeetuna" +) + +func main() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + Username: username, + Password: password, + }) + + if err != nil { + log.Fatalln("Error: ", err) + } + + // Create a new point batch + bp, err := client.NewBatchPoints(client.BatchPointsConfig{ + Database: MyDB, + Precision: "s", + }) + + if err != nil { + log.Fatalln("Error: ", err) + } + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + + if err != nil { + log.Fatalln("Error: ", err) + } + + bp.AddPoint(pt) + + // Write the batch + c.Write(bp) +} + +``` + +### Inserting Data + +Time series data aka *points* are written to the database using batch inserts. +The mechanism is to create one or more points and then create a batch aka +*batch points* and write these to a given database and series. A series is a +combination of a measurement (time/values) and a set of tags. + +In this sample we will create a batch of a 1,000 points. Each point has a time and +a single value as well as 2 tags indicating a shape and color. We write these points +to a database called _square_holes_ using a measurement named _shapes_. + +NOTE: You can specify a RetentionPolicy as part of the batch points. If not +provided InfluxDB will use the database _default_ retention policy. + +```go +func writePoints(clnt client.Client) { + sampleSize := 1000 + rand.Seed(42) + + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Database: "systemstats", + Precision: "us", + }) + + for i := 0; i < sampleSize; i++ { + regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"} + tags := map[string]string{ + "cpu": "cpu-total", + "host": fmt.Sprintf("host%d", rand.Intn(1000)), + "region": regions[rand.Intn(len(regions))], + } + + idle := rand.Float64() * 100.0 + fields := map[string]interface{}{ + "idle": idle, + "busy": 100.0 - idle, + } + + bp.AddPoint(client.NewPoint( + "cpu_usage", + tags, + fields, + time.Now(), + )) + } + + err := clnt.Write(bp) + if err != nil { + log.Fatal(err) + } +} +``` + + +### Querying Data + +One nice advantage of using **InfluxDB** the ability to query your data using familiar +SQL constructs. In this example we can create a convenience function to query the database +as follows: + +```go +// queryDB convenience function to query the database +func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) { + q := client.Query{ + Command: cmd, + Database: MyDB, + } + if response, err := clnt.Query(q); err == nil { + if response.Error() != nil { + return res, response.Error() + } + res = response.Results + } else { + return res, err + } + return res, nil +} +``` + +#### Creating a Database + +```go +_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB)) +if err != nil { + log.Fatal(err) +} +``` + +#### Count Records + +```go +q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement) +res, err := queryDB(clnt, q) +if err != nil { + log.Fatal(err) +} +count := res[0].Series[0].Values[0][1] +log.Printf("Found a total of %v records\n", count) +``` + +#### Find the last 10 _shapes_ records + +```go +q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 20) +res, err = queryDB(clnt, q) +if err != nil { + log.Fatal(err) +} + +for i, row := range res[0].Series[0].Values { + t, err := time.Parse(time.RFC3339, row[0].(string)) + if err != nil { + log.Fatal(err) + } + val := row[1].(string) + log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val) +} +``` + +### Using the UDP Client + +The **InfluxDB** client also supports writing over UDP. + +```go +func WriteUDP() { + // Make client + c, err := client.NewUDPClient("localhost:8089") + if err != nil { + panic(err.Error()) + } + + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Precision: "s", + }) + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + panic(err.Error()) + } + bp.AddPoint(pt) + + // Write the batch + c.Write(bp) +} +``` + +### Point Splitting + +The UDP client now supports splitting single points that exceed the configured +payload size. The logic for processing each point is listed here, starting with +an empty payload. + +1. If adding the point to the current (non-empty) payload would exceed the + configured size, send the current payload. Otherwise, add it to the current + payload. +1. If the point is smaller than the configured size, add it to the payload. +1. If the point has no timestamp, just try to send the entire point as a single + UDP payload, and process the next point. +1. Since the point has a timestamp, re-use the existing measurement name, + tagset, and timestamp and create multiple new points by splitting up the + fields. The per-point length will be kept close to the configured size, + staying under it if possible. This does mean that one large field, maybe a + long string, could be sent as a larger-than-configured payload. + +The above logic attempts to respect configured payload sizes, but not sacrifice +any data integrity. Points without a timestamp can't be split, as that may +cause fields to have differing timestamps when processed by the server. + +## Go Docs + +Please refer to +[http://godoc.org/github.com/influxdata/influxdb/client/v2](http://godoc.org/github.com/influxdata/influxdb/client/v2) +for documentation. + +## See Also + +You can also examine how the client library is used by the +[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go). diff --git a/vendor/github.com/influxdata/influxdb/client/example_test.go b/vendor/github.com/influxdata/influxdb/client/example_test.go new file mode 100644 index 000000000000..f3753834a97d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/example_test.go @@ -0,0 +1,113 @@ +package client_test + +import ( + "fmt" + "log" + "math/rand" + "net/url" + "os" + "strconv" + "time" + + "github.com/influxdata/influxdb/client" +) + +func ExampleNewClient() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + + // NOTE: this assumes you've setup a user and have setup shell env variables, + // namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below. + conf := client.Config{ + URL: *host, + Username: os.Getenv("INFLUX_USER"), + Password: os.Getenv("INFLUX_PWD"), + } + con, err := client.NewClient(conf) + if err != nil { + log.Fatal(err) + } + log.Println("Connection", con) +} + +func ExampleClient_Ping() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + con, err := client.NewClient(client.Config{URL: *host}) + if err != nil { + log.Fatal(err) + } + + dur, ver, err := con.Ping() + if err != nil { + log.Fatal(err) + } + log.Printf("Happy as a hippo! %v, %s", dur, ver) +} + +func ExampleClient_Query() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + con, err := client.NewClient(client.Config{URL: *host}) + if err != nil { + log.Fatal(err) + } + + q := client.Query{ + Command: "select count(value) from shapes", + Database: "square_holes", + } + if response, err := con.Query(q); err == nil && response.Error() == nil { + log.Println(response.Results) + } +} + +func ExampleClient_Write() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + con, err := client.NewClient(client.Config{URL: *host}) + if err != nil { + log.Fatal(err) + } + + var ( + shapes = []string{"circle", "rectangle", "square", "triangle"} + colors = []string{"red", "blue", "green"} + sampleSize = 1000 + pts = make([]client.Point, sampleSize) + ) + + rand.Seed(42) + for i := 0; i < sampleSize; i++ { + pts[i] = client.Point{ + Measurement: "shapes", + Tags: map[string]string{ + "color": strconv.Itoa(rand.Intn(len(colors))), + "shape": strconv.Itoa(rand.Intn(len(shapes))), + }, + Fields: map[string]interface{}{ + "value": rand.Intn(sampleSize), + }, + Time: time.Now(), + Precision: "s", + } + } + + bps := client.BatchPoints{ + Points: pts, + Database: "BumbeBeeTuna", + RetentionPolicy: "default", + } + _, err = con.Write(bps) + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb.go b/vendor/github.com/influxdata/influxdb/client/influxdb.go new file mode 100644 index 000000000000..909fe747f19a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/influxdb.go @@ -0,0 +1,829 @@ +package client // import "github.com/influxdata/influxdb/client" + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb/models" +) + +const ( + // DefaultHost is the default host used to connect to an InfluxDB instance + DefaultHost = "localhost" + + // DefaultPort is the default port used to connect to an InfluxDB instance + DefaultPort = 8086 + + // DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance + DefaultTimeout = 0 +) + +// Query is used to send a command to the server. Both Command and Database are required. +type Query struct { + Command string + Database string + + // Chunked tells the server to send back chunked responses. This places + // less load on the server by sending back chunks of the response rather + // than waiting for the entire response all at once. + Chunked bool + + // ChunkSize sets the maximum number of rows that will be returned per + // chunk. Chunks are either divided based on their series or if they hit + // the chunk size limit. + // + // Chunked must be set to true for this option to be used. + ChunkSize int +} + +// ParseConnectionString will parse a string to create a valid connection URL +func ParseConnectionString(path string, ssl bool) (url.URL, error) { + var host string + var port int + + h, p, err := net.SplitHostPort(path) + if err != nil { + if path == "" { + host = DefaultHost + } else { + host = path + } + // If they didn't specify a port, always use the default port + port = DefaultPort + } else { + host = h + port, err = strconv.Atoi(p) + if err != nil { + return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err) + } + } + + u := url.URL{ + Scheme: "http", + } + if ssl { + u.Scheme = "https" + } + + u.Host = net.JoinHostPort(host, strconv.Itoa(port)) + + return u, nil +} + +// Config is used to specify what server to connect to. +// URL: The URL of the server connecting to. +// Username/Password are optional. They will be passed via basic auth if provided. +// UserAgent: If not provided, will default "InfluxDBClient", +// Timeout: If not provided, will default to 0 (no timeout) +type Config struct { + URL url.URL + UnixSocket string + Username string + Password string + UserAgent string + Timeout time.Duration + Precision string + WriteConsistency string + UnsafeSsl bool +} + +// NewConfig will create a config to be used in connecting to the client +func NewConfig() Config { + return Config{ + Timeout: DefaultTimeout, + } +} + +// Client is used to make calls to the server. +type Client struct { + url url.URL + unixSocket string + username string + password string + httpClient *http.Client + userAgent string + precision string +} + +const ( + // ConsistencyOne requires at least one data node acknowledged a write. + ConsistencyOne = "one" + + // ConsistencyAll requires all data nodes to acknowledge a write. + ConsistencyAll = "all" + + // ConsistencyQuorum requires a quorum of data nodes to acknowledge a write. + ConsistencyQuorum = "quorum" + + // ConsistencyAny allows for hinted hand off, potentially no write happened yet. + ConsistencyAny = "any" +) + +// NewClient will instantiate and return a connected client to issue commands to the server. +func NewClient(c Config) (*Client, error) { + tlsConfig := &tls.Config{ + InsecureSkipVerify: c.UnsafeSsl, + } + + tr := &http.Transport{ + TLSClientConfig: tlsConfig, + } + + if c.UnixSocket != "" { + // No need for compression in local communications. + tr.DisableCompression = true + + tr.Dial = func(_, _ string) (net.Conn, error) { + return net.Dial("unix", c.UnixSocket) + } + } + + client := Client{ + url: c.URL, + unixSocket: c.UnixSocket, + username: c.Username, + password: c.Password, + httpClient: &http.Client{Timeout: c.Timeout, Transport: tr}, + userAgent: c.UserAgent, + precision: c.Precision, + } + if client.userAgent == "" { + client.userAgent = "InfluxDBClient" + } + return &client, nil +} + +// SetAuth will update the username and passwords +func (c *Client) SetAuth(u, p string) { + c.username = u + c.password = p +} + +// SetPrecision will update the precision +func (c *Client) SetPrecision(precision string) { + c.precision = precision +} + +// Query sends a command to the server and returns the Response +func (c *Client) Query(q Query) (*Response, error) { + u := c.url + + u.Path = "query" + values := u.Query() + values.Set("q", q.Command) + values.Set("db", q.Database) + if q.Chunked { + values.Set("chunked", "true") + if q.ChunkSize > 0 { + values.Set("chunk_size", strconv.Itoa(q.ChunkSize)) + } + } + if c.precision != "" { + values.Set("epoch", c.precision) + } + u.RawQuery = values.Encode() + + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + if q.Chunked { + cr := NewChunkedResponse(resp.Body) + for { + r, err := cr.NextResponse() + if err != nil { + // If we got an error while decoding the response, send that back. + return nil, err + } + + if r == nil { + break + } + + response.Results = append(response.Results, r.Results...) + if r.Err != nil { + response.Err = r.Err + break + } + } + } else { + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + if err := dec.Decode(&response); err != nil { + // Ignore EOF errors if we got an invalid status code. + if !(err == io.EOF && resp.StatusCode != http.StatusOK) { + return nil, err + } + } + } + + // If we don't have an error in our json response, and didn't get StatusOK, + // then send back an error. + if resp.StatusCode != http.StatusOK && response.Error() == nil { + return &response, fmt.Errorf("received status code %d from server", resp.StatusCode) + } + return &response, nil +} + +// Write takes BatchPoints and allows for writing of multiple points with defaults +// If successful, error is nil and Response is nil +// If an error occurs, Response may contain additional information if populated. +func (c *Client) Write(bp BatchPoints) (*Response, error) { + u := c.url + u.Path = "write" + + var b bytes.Buffer + for _, p := range bp.Points { + err := checkPointTypes(p) + if err != nil { + return nil, err + } + if p.Raw != "" { + if _, err := b.WriteString(p.Raw); err != nil { + return nil, err + } + } else { + for k, v := range bp.Tags { + if p.Tags == nil { + p.Tags = make(map[string]string, len(bp.Tags)) + } + p.Tags[k] = v + } + + if _, err := b.WriteString(p.MarshalString()); err != nil { + return nil, err + } + } + + if err := b.WriteByte('\n'); err != nil { + return nil, err + } + } + + req, err := http.NewRequest("POST", u.String(), &b) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + precision := bp.Precision + if precision == "" { + precision = c.precision + } + + params := req.URL.Query() + params.Set("db", bp.Database) + params.Set("rp", bp.RetentionPolicy) + params.Set("precision", precision) + params.Set("consistency", bp.WriteConsistency) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + var err = fmt.Errorf(string(body)) + response.Err = err + return &response, err + } + + return nil, nil +} + +// WriteLineProtocol takes a string with line returns to delimit each write +// If successful, error is nil and Response is nil +// If an error occurs, Response may contain additional information if populated. +func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) { + u := c.url + u.Path = "write" + + r := strings.NewReader(data) + + req, err := http.NewRequest("POST", u.String(), r) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + params := req.URL.Query() + params.Set("db", database) + params.Set("rp", retentionPolicy) + params.Set("precision", precision) + params.Set("consistency", writeConsistency) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + err := fmt.Errorf(string(body)) + response.Err = err + return &response, err + } + + return nil, nil +} + +// Ping will check to see if the server is up +// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. +func (c *Client) Ping() (time.Duration, string, error) { + now := time.Now() + u := c.url + u.Path = "ping" + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return 0, "", err + } + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return 0, "", err + } + defer resp.Body.Close() + + version := resp.Header.Get("X-Influxdb-Version") + return time.Since(now), version, nil +} + +// Structs + +// Message represents a user message. +type Message struct { + Level string `json:"level,omitempty"` + Text string `json:"text,omitempty"` +} + +// Result represents a resultset returned from a single statement. +type Result struct { + Series []models.Row + Messages []*Message + Err error +} + +// MarshalJSON encodes the result into JSON. +func (r *Result) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Series []models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` + } + + // Copy fields to output struct. + o.Series = r.Series + o.Messages = r.Messages + if r.Err != nil { + o.Err = r.Err.Error() + } + + return json.Marshal(&o) +} + +// UnmarshalJSON decodes the data into the Result struct +func (r *Result) UnmarshalJSON(b []byte) error { + var o struct { + Series []models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + err := dec.Decode(&o) + if err != nil { + return err + } + r.Series = o.Series + r.Messages = o.Messages + if o.Err != "" { + r.Err = errors.New(o.Err) + } + return nil +} + +// Response represents a list of statement results. +type Response struct { + Results []Result + Err error +} + +// MarshalJSON encodes the response into JSON. +func (r *Response) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Results []Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` + } + + // Copy fields to output struct. + o.Results = r.Results + if r.Err != nil { + o.Err = r.Err.Error() + } + + return json.Marshal(&o) +} + +// UnmarshalJSON decodes the data into the Response struct +func (r *Response) UnmarshalJSON(b []byte) error { + var o struct { + Results []Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + err := dec.Decode(&o) + if err != nil { + return err + } + r.Results = o.Results + if o.Err != "" { + r.Err = errors.New(o.Err) + } + return nil +} + +// Error returns the first error from any statement. +// Returns nil if no errors occurred on any statements. +func (r *Response) Error() error { + if r.Err != nil { + return r.Err + } + for _, result := range r.Results { + if result.Err != nil { + return result.Err + } + } + return nil +} + +// duplexReader reads responses and writes it to another writer while +// satisfying the reader interface. +type duplexReader struct { + r io.Reader + w io.Writer +} + +func (r *duplexReader) Read(p []byte) (n int, err error) { + n, err = r.r.Read(p) + if err == nil { + r.w.Write(p[:n]) + } + return n, err +} + +// ChunkedResponse represents a response from the server that +// uses chunking to stream the output. +type ChunkedResponse struct { + dec *json.Decoder + duplex *duplexReader + buf bytes.Buffer +} + +// NewChunkedResponse reads a stream and produces responses from the stream. +func NewChunkedResponse(r io.Reader) *ChunkedResponse { + resp := &ChunkedResponse{} + resp.duplex = &duplexReader{r: r, w: &resp.buf} + resp.dec = json.NewDecoder(resp.duplex) + resp.dec.UseNumber() + return resp +} + +// NextResponse reads the next line of the stream and returns a response. +func (r *ChunkedResponse) NextResponse() (*Response, error) { + var response Response + if err := r.dec.Decode(&response); err != nil { + if err == io.EOF { + return nil, nil + } + // A decoding error happened. This probably means the server crashed + // and sent a last-ditch error message to us. Ensure we have read the + // entirety of the connection to get any remaining error text. + io.Copy(ioutil.Discard, r.duplex) + return nil, errors.New(strings.TrimSpace(r.buf.String())) + } + r.buf.Reset() + return &response, nil +} + +// Point defines the fields that will be written to the database +// Measurement, Time, and Fields are required +// Precision can be specified if the time is in epoch format (integer). +// Valid values for Precision are n, u, ms, s, m, and h +type Point struct { + Measurement string + Tags map[string]string + Time time.Time + Fields map[string]interface{} + Precision string + Raw string +} + +// MarshalJSON will format the time in RFC3339Nano +// Precision is also ignored as it is only used for writing, not reading +// Or another way to say it is we always send back in nanosecond precision +func (p *Point) MarshalJSON() ([]byte, error) { + point := struct { + Measurement string `json:"measurement,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Time string `json:"time,omitempty"` + Fields map[string]interface{} `json:"fields,omitempty"` + Precision string `json:"precision,omitempty"` + }{ + Measurement: p.Measurement, + Tags: p.Tags, + Fields: p.Fields, + Precision: p.Precision, + } + // Let it omit empty if it's really zero + if !p.Time.IsZero() { + point.Time = p.Time.UTC().Format(time.RFC3339Nano) + } + return json.Marshal(&point) +} + +// MarshalString renders string representation of a Point with specified +// precision. The default precision is nanoseconds. +func (p *Point) MarshalString() string { + pt, err := models.NewPoint(p.Measurement, models.NewTags(p.Tags), p.Fields, p.Time) + if err != nil { + return "# ERROR: " + err.Error() + " " + p.Measurement + } + if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" { + return pt.String() + } + return pt.PrecisionString(p.Precision) +} + +// UnmarshalJSON decodes the data into the Point struct +func (p *Point) UnmarshalJSON(b []byte) error { + var normal struct { + Measurement string `json:"measurement"` + Tags map[string]string `json:"tags"` + Time time.Time `json:"time"` + Precision string `json:"precision"` + Fields map[string]interface{} `json:"fields"` + } + var epoch struct { + Measurement string `json:"measurement"` + Tags map[string]string `json:"tags"` + Time *int64 `json:"time"` + Precision string `json:"precision"` + Fields map[string]interface{} `json:"fields"` + } + + if err := func() error { + var err error + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + if err = dec.Decode(&epoch); err != nil { + return err + } + // Convert from epoch to time.Time, but only if Time + // was actually set. + var ts time.Time + if epoch.Time != nil { + ts, err = EpochToTime(*epoch.Time, epoch.Precision) + if err != nil { + return err + } + } + p.Measurement = epoch.Measurement + p.Tags = epoch.Tags + p.Time = ts + p.Precision = epoch.Precision + p.Fields = normalizeFields(epoch.Fields) + return nil + }(); err == nil { + return nil + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + if err := dec.Decode(&normal); err != nil { + return err + } + normal.Time = SetPrecision(normal.Time, normal.Precision) + p.Measurement = normal.Measurement + p.Tags = normal.Tags + p.Time = normal.Time + p.Precision = normal.Precision + p.Fields = normalizeFields(normal.Fields) + + return nil +} + +// Remove any notion of json.Number +func normalizeFields(fields map[string]interface{}) map[string]interface{} { + newFields := map[string]interface{}{} + + for k, v := range fields { + switch v := v.(type) { + case json.Number: + jv, e := v.Float64() + if e != nil { + panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e)) + } + newFields[k] = jv + default: + newFields[k] = v + } + } + return newFields +} + +// BatchPoints is used to send batched data in a single write. +// Database and Points are required +// If no retention policy is specified, it will use the databases default retention policy. +// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored. +// If time is specified, it will be applied to any point with an empty time. +// Precision can be specified if the time is in epoch format (integer). +// Valid values for Precision are n, u, ms, s, m, and h +type BatchPoints struct { + Points []Point `json:"points,omitempty"` + Database string `json:"database,omitempty"` + RetentionPolicy string `json:"retentionPolicy,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Time time.Time `json:"time,omitempty"` + Precision string `json:"precision,omitempty"` + WriteConsistency string `json:"-"` +} + +// UnmarshalJSON decodes the data into the BatchPoints struct +func (bp *BatchPoints) UnmarshalJSON(b []byte) error { + var normal struct { + Points []Point `json:"points"` + Database string `json:"database"` + RetentionPolicy string `json:"retentionPolicy"` + Tags map[string]string `json:"tags"` + Time time.Time `json:"time"` + Precision string `json:"precision"` + } + var epoch struct { + Points []Point `json:"points"` + Database string `json:"database"` + RetentionPolicy string `json:"retentionPolicy"` + Tags map[string]string `json:"tags"` + Time *int64 `json:"time"` + Precision string `json:"precision"` + } + + if err := func() error { + var err error + if err = json.Unmarshal(b, &epoch); err != nil { + return err + } + // Convert from epoch to time.Time + var ts time.Time + if epoch.Time != nil { + ts, err = EpochToTime(*epoch.Time, epoch.Precision) + if err != nil { + return err + } + } + bp.Points = epoch.Points + bp.Database = epoch.Database + bp.RetentionPolicy = epoch.RetentionPolicy + bp.Tags = epoch.Tags + bp.Time = ts + bp.Precision = epoch.Precision + return nil + }(); err == nil { + return nil + } + + if err := json.Unmarshal(b, &normal); err != nil { + return err + } + normal.Time = SetPrecision(normal.Time, normal.Precision) + bp.Points = normal.Points + bp.Database = normal.Database + bp.RetentionPolicy = normal.RetentionPolicy + bp.Tags = normal.Tags + bp.Time = normal.Time + bp.Precision = normal.Precision + + return nil +} + +// utility functions + +// Addr provides the current url as a string of the server the client is connected to. +func (c *Client) Addr() string { + if c.unixSocket != "" { + return c.unixSocket + } + return c.url.String() +} + +// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found. +func checkPointTypes(p Point) error { + for _, v := range p.Fields { + switch v.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool, string, nil: + return nil + default: + return fmt.Errorf("unsupported point type: %T", v) + } + } + return nil +} + +// helper functions + +// EpochToTime takes a unix epoch time and uses precision to return back a time.Time +func EpochToTime(epoch int64, precision string) (time.Time, error) { + if precision == "" { + precision = "s" + } + var t time.Time + switch precision { + case "h": + t = time.Unix(0, epoch*int64(time.Hour)) + case "m": + t = time.Unix(0, epoch*int64(time.Minute)) + case "s": + t = time.Unix(0, epoch*int64(time.Second)) + case "ms": + t = time.Unix(0, epoch*int64(time.Millisecond)) + case "u": + t = time.Unix(0, epoch*int64(time.Microsecond)) + case "n": + t = time.Unix(0, epoch) + default: + return time.Time{}, fmt.Errorf("Unknown precision %q", precision) + } + return t, nil +} + +// SetPrecision will round a time to the specified precision +func SetPrecision(t time.Time, precision string) time.Time { + switch precision { + case "n": + case "u": + return t.Round(time.Microsecond) + case "ms": + return t.Round(time.Millisecond) + case "s": + return t.Round(time.Second) + case "m": + return t.Round(time.Minute) + case "h": + return t.Round(time.Hour) + } + return t +} diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb_test.go b/vendor/github.com/influxdata/influxdb/client/influxdb_test.go new file mode 100644 index 000000000000..39349c19ee33 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/influxdb_test.go @@ -0,0 +1,831 @@ +package client_test + +import ( + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "testing" + "time" + + "github.com/influxdata/influxdb/client" +) + +func BenchmarkWrite(b *testing.B) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + b.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + bp := client.BatchPoints{ + Points: []client.Point{ + {Fields: map[string]interface{}{"value": 101}}}, + } + for i := 0; i < b.N; i++ { + r, err := c.Write(bp) + if err != nil { + b.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if r != nil { + b.Fatalf("unexpected response. expected %v, actual %v", nil, r) + } + } +} + +func BenchmarkUnmarshalJSON2Tags(b *testing.B) { + var bp client.BatchPoints + data := []byte(` +{ + "database": "foo", + "retentionPolicy": "bar", + "points": [ + { + "name": "cpu", + "tags": { + "host": "server01", + "region": "us-east1" + }, + "time": 14244733039069373, + "precision": "n", + "fields": { + "value": 4541770385657154000 + } + } + ] +} +`) + + for i := 0; i < b.N; i++ { + if err := json.Unmarshal(data, &bp); err != nil { + b.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) + } + b.SetBytes(int64(len(data))) + } +} + +func BenchmarkUnmarshalJSON10Tags(b *testing.B) { + var bp client.BatchPoints + data := []byte(` +{ + "database": "foo", + "retentionPolicy": "bar", + "points": [ + { + "name": "cpu", + "tags": { + "host": "server01", + "region": "us-east1", + "tag1": "value1", + "tag2": "value2", + "tag2": "value3", + "tag4": "value4", + "tag5": "value5", + "tag6": "value6", + "tag7": "value7", + "tag8": "value8" + }, + "time": 14244733039069373, + "precision": "n", + "fields": { + "value": 4541770385657154000 + } + } + ] +} +`) + + for i := 0; i < b.N; i++ { + if err := json.Unmarshal(data, &bp); err != nil { + b.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) + } + b.SetBytes(int64(len(data))) + } +} + +func TestNewClient(t *testing.T) { + config := client.Config{} + _, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Ping(t *testing.T) { + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + d, version, err := c.Ping() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if d.Nanoseconds() == 0 { + t.Fatalf("expected a duration greater than zero. actual %v", d.Nanoseconds()) + } + if version != "x.x" { + t.Fatalf("unexpected version. expected %s, actual %v", "x.x", version) + } +} + +func TestClient_Query(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_ChunkedQuery(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusOK) + enc := json.NewEncoder(w) + _ = enc.Encode(data) + _ = enc.Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{Chunked: true} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_BasicAuth(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + + if !ok { + t.Errorf("basic auth error") + } + if u != "username" { + t.Errorf("unexpected username, expected %q, actual %q", "username", u) + } + if p != "password" { + t.Errorf("unexpected password, expected %q, actual %q", "password", p) + } + w.WriteHeader(http.StatusNoContent) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + u.User = url.UserPassword("username", "password") + config := client.Config{URL: *u, Username: "username", Password: "password"} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + _, _, err = c.Ping() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Write(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + bp := client.BatchPoints{} + r, err := c.Write(bp) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if r != nil { + t.Fatalf("unexpected response. expected %v, actual %v", nil, r) + } +} + +func TestClient_UserAgent(t *testing.T) { + receivedUserAgent := "" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + receivedUserAgent = r.UserAgent() + + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + _, err := http.Get(ts.URL) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + tests := []struct { + name string + userAgent string + expected string + }{ + { + name: "Empty user agent", + userAgent: "", + expected: "InfluxDBClient", + }, + { + name: "Custom user agent", + userAgent: "Test Influx Client", + expected: "Test Influx Client", + }, + } + + for _, test := range tests { + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u, UserAgent: test.userAgent} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + receivedUserAgent = "" + query := client.Query{} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + bp := client.BatchPoints{} + _, err = c.Write(bp) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + _, _, err = c.Ping() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if receivedUserAgent != test.expected { + t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + } +} + +func TestClient_Messages(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"results":[{"messages":[{"level":"warning","text":"deprecation test"}]}]}`)) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{} + resp, err := c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + if got, exp := len(resp.Results), 1; got != exp { + t.Fatalf("unexpected number of results. expected %v, actual %v", exp, got) + } + + r := resp.Results[0] + if got, exp := len(r.Messages), 1; got != exp { + t.Fatalf("unexpected number of messages. expected %v, actual %v", exp, got) + } + + m := r.Messages[0] + if got, exp := m.Level, "warning"; got != exp { + t.Errorf("unexpected message level. expected %v, actual %v", exp, got) + } + if got, exp := m.Text, "deprecation test"; got != exp { + t.Errorf("unexpected message text. expected %v, actual %v", exp, got) + } +} + +func TestPoint_UnmarshalEpoch(t *testing.T) { + now := time.Now() + tests := []struct { + name string + epoch int64 + precision string + expected time.Time + }{ + { + name: "nanoseconds", + epoch: now.UnixNano(), + precision: "n", + expected: now, + }, + { + name: "microseconds", + epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond), + precision: "u", + expected: now.Round(time.Microsecond), + }, + { + name: "milliseconds", + epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond), + precision: "ms", + expected: now.Round(time.Millisecond), + }, + { + name: "seconds", + epoch: now.Round(time.Second).UnixNano() / int64(time.Second), + precision: "s", + expected: now.Round(time.Second), + }, + { + name: "minutes", + epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute), + precision: "m", + expected: now.Round(time.Minute), + }, + { + name: "hours", + epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour), + precision: "h", + expected: now.Round(time.Hour), + }, + { + name: "max int64", + epoch: 9223372036854775807, + precision: "n", + expected: time.Unix(0, 9223372036854775807), + }, + { + name: "100 years from now", + epoch: now.Add(time.Hour * 24 * 365 * 100).UnixNano(), + precision: "n", + expected: now.Add(time.Hour * 24 * 365 * 100), + }, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + data := []byte(fmt.Sprintf(`{"time": %d, "precision":"%s"}`, test.epoch, test.precision)) + t.Logf("json: %s", string(data)) + var p client.Point + err := json.Unmarshal(data, &p) + if err != nil { + t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) + } + if !p.Time.Equal(test.expected) { + t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time) + } + } +} + +func TestPoint_UnmarshalRFC(t *testing.T) { + now := time.Now().UTC() + tests := []struct { + name string + rfc string + now time.Time + expected time.Time + }{ + { + name: "RFC3339Nano", + rfc: time.RFC3339Nano, + now: now, + expected: now, + }, + { + name: "RFC3339", + rfc: time.RFC3339, + now: now.Round(time.Second), + expected: now.Round(time.Second), + }, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + ts := test.now.Format(test.rfc) + data := []byte(fmt.Sprintf(`{"time": %q}`, ts)) + t.Logf("json: %s", string(data)) + var p client.Point + err := json.Unmarshal(data, &p) + if err != nil { + t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) + } + if !p.Time.Equal(test.expected) { + t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time) + } + } +} + +func TestPoint_MarshalOmitempty(t *testing.T) { + now := time.Now().UTC() + tests := []struct { + name string + point client.Point + now time.Time + expected string + }{ + { + name: "all empty", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}}, + now: now, + expected: `{"measurement":"cpu","fields":{"value":1.1}}`, + }, + { + name: "with time", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Time: now}, + now: now, + expected: fmt.Sprintf(`{"measurement":"cpu","time":"%s","fields":{"value":1.1}}`, now.Format(time.RFC3339Nano)), + }, + { + name: "with tags", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Tags: map[string]string{"foo": "bar"}}, + now: now, + expected: `{"measurement":"cpu","tags":{"foo":"bar"},"fields":{"value":1.1}}`, + }, + { + name: "with precision", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Precision: "ms"}, + now: now, + expected: `{"measurement":"cpu","fields":{"value":1.1},"precision":"ms"}`, + }, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + b, err := json.Marshal(&test.point) + if err != nil { + t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) + } + if test.expected != string(b) { + t.Fatalf("Unexpected result. expected: %v, actual: %v", test.expected, string(b)) + } + } +} + +func TestEpochToTime(t *testing.T) { + now := time.Now() + + tests := []struct { + name string + epoch int64 + precision string + expected time.Time + }{ + {name: "nanoseconds", epoch: now.UnixNano(), precision: "n", expected: now}, + {name: "microseconds", epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond), precision: "u", expected: now.Round(time.Microsecond)}, + {name: "milliseconds", epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond), precision: "ms", expected: now.Round(time.Millisecond)}, + {name: "seconds", epoch: now.Round(time.Second).UnixNano() / int64(time.Second), precision: "s", expected: now.Round(time.Second)}, + {name: "minutes", epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute), precision: "m", expected: now.Round(time.Minute)}, + {name: "hours", epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour), precision: "h", expected: now.Round(time.Hour)}, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + tm, e := client.EpochToTime(test.epoch, test.precision) + if e != nil { + t.Fatalf("unexpected error: expected %v, actual: %v", nil, e) + } + if tm != test.expected { + t.Fatalf("unexpected time: expected %v, actual %v", test.expected, tm) + } + } +} + +// helper functions + +func emptyTestServer() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(50 * time.Millisecond) + w.Header().Set("X-Influxdb-Version", "x.x") + return + })) +} + +// Ensure that data with epoch times can be decoded. +func TestBatchPoints_Normal(t *testing.T) { + var bp client.BatchPoints + data := []byte(` +{ + "database": "foo", + "retentionPolicy": "bar", + "points": [ + { + "name": "cpu", + "tags": { + "host": "server01" + }, + "time": 14244733039069373, + "precision": "n", + "values": { + "value": 4541770385657154000 + } + }, + { + "name": "cpu", + "tags": { + "host": "server01" + }, + "time": 14244733039069380, + "precision": "n", + "values": { + "value": 7199311900554737000 + } + } + ] +} +`) + + if err := json.Unmarshal(data, &bp); err != nil { + t.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) + } +} + +func TestClient_Timeout(t *testing.T) { + done := make(chan bool) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + <-done + })) + defer ts.Close() + defer func() { done <- true }() + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u, Timeout: 500 * time.Millisecond} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + query := client.Query{} + _, err = c.Query(query) + if err == nil { + t.Fatalf("unexpected success. expected timeout error") + } else if !strings.Contains(err.Error(), "request canceled") && + !strings.Contains(err.Error(), "use of closed network connection") { + t.Fatalf("unexpected error. expected 'request canceled' error, got %v", err) + } +} + +func TestClient_NoTimeout(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(1 * time.Second) + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_WriteUint64(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + bp := client.BatchPoints{ + Points: []client.Point{ + { + Fields: map[string]interface{}{"value": uint64(10)}, + }, + }, + } + r, err := c.Write(bp) + if err == nil { + t.Fatalf("unexpected error. expected err, actual %v", err) + } + if r != nil { + t.Fatalf("unexpected response. expected %v, actual %v", nil, r) + } +} + +func TestClient_ParseConnectionString_IPv6(t *testing.T) { + path := "[fdf5:9ede:1875:0:a9ee:a600:8fe3:d495]:8086" + u, err := client.ParseConnectionString(path, false) + if err != nil { + t.Fatalf("unexpected error, expected %v, actual %v", nil, err) + } + if u.Host != path { + t.Fatalf("ipv6 parse failed, expected %s, actual %s", path, u.Host) + } +} + +func TestClient_CustomCertificates(t *testing.T) { + // generated with: + // openssl req -x509 -newkey rsa:2048 -keyout key.pem -out cert.pem -days 3650 -nodes -config influx.cnf + // influx.cnf: + // [req] + // distinguished_name = req_distinguished_name + // x509_extensions = v3_req + // prompt = no + // [req_distinguished_name] + // C = US + // ST = CA + // L = San Francisco + // O = InfluxDB + // CN = github.com/influxdata + // [v3_req] + // keyUsage = keyEncipherment, dataEncipherment + // extendedKeyUsage = serverAuth + // subjectAltName = @alt_names + // [alt_names] + // IP.1 = 127.0.0.1 + // + key := ` +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDLswqKJLxfhBRi +4qdj7+jpBxTAi4MewrcMPp+9YlbLke3F7w2DPrZVkYVeWmg8LyTPAigrXeadK6hv +qjRr05a7sMc5+ynivGbWUySWT+u17V85x6VR5TMIkJEOqpiIU8aYk0l+3UcrzVjS +1QZCUBoxVwAVaSR6AXTA8YrVXdk/AI3f22dYiBjFmV4LJJkGjTaCnlDKu54hMU1t +WTyFcoY9TBzZ1XA+ng5RQ/QADeL2PYrTW4s/mLI3jfKKD53EI4uM2FjW37ZfuxTa +mhCR7/lxM4COg9K70y5uebfqJvuoXAwXLOzVbdfF5b9fJFbL67kaK2tiMT3Wt39m +hXzclLTDAgMBAAECggEAK8mpElkjRUUXPMqMQSdpYe5rv5g973bb8n3jyMpC7i/I +dSwWM4hfmbVWfhnhHk7kErvb9raQxGiGJLrp2eP6Gw69RPGA54SodpoY21cCzHDi +b4FDQH+MoOKyy/xQHb4kitfejK70ha320huI5OhjOQgCtJeNh8yYVIGX3pX2BVyu +36UB9tfX1S5pbiHeih3vZGd322Muj/joNzIelnYRBnoO0xqvQ0S1Dk+dLCTHO0/m +u9AZN8c2TsRWZpJPMWwBv8LuABbE0e66/TSsrfklAn86ELCo44lZURDE7uPZ4pIH +FWtmf+nW5Hy6aPhy60E40MqotlejhWwB3ktY/m3JAQKBgQDuB4nhxzJA9lH9EaCt +byvJ9wGVvI3k79hOwc/Z2R3dNe+Ma+TJy+aBppvsLF4qz83aWC+canyasbHcPNR/ +vXQGlsgKfucrmd1PfMV7uvOIkfOjK0E6mRC+jMuKtNTQrdtM1BU/Z7LY0iy0fNJ6 +aNqhFdlJmmk0g+4bR4SAWB6FkwKBgQDbE/7r1u+GdJk/mhdjTi1aegr9lXb0l7L6 +BCvOYhs/Z/pXfsaYPSXhgk2w+LiGk6BaEA2/4Sr0YS2MAAaIhBVeFBIXVpNrXB3K +Yg1jOEeLQ3qoVBeJFhJNrN9ZQx33HANC1W/Y1apMwaYqCRUGVQkrdcsN2KNea1z0 +3qeYeCCSEQKBgCKZKeuNfrp+k1BLnaVYAW9r3ekb7SwXyMM53LJ3oqWiz10D2c+T +OcAirYtYr59dcTiJlPIRcGcz6PxwQxsGOLU0eYM9CvEFfmutYS8o73ksbdOL2AFi +elKYOIXC3yQuATBbq3L56b8mXaUmd5mfYBgGCv1t2ljtzFBext248UbNAoGBAIv1 +2V24YiwnH6THf/ucfVMZNx5Mt8OJivk5YvcmLDw05HWzc5LdNe89PP871z963u3K +5c3ZP4UC9INFnOboY3JIJkqsr9/d6NZcECt8UBDDmoAhwSt+Y1EmiUZQn7s4NUkk +bKE919/Ts6GVTc5O013lkkUVS0HOG4QBH1dEH6LRAoGAStl11WA9tuKXiBl5XG/C +cq9mFPNJK3pEgd6YH874vEnYEEqENR4MFK3uWXus9Nm+VYxbUbPEzFF4kpsfukDg +/JAVqY4lUam7g6fyyaoIIPQEp7jGjbsUf46IjnUjFcaojOugA3EAfn9awREUDuJZ +cvh4WzEegcExTppINW1NB5E= +-----END PRIVATE KEY----- +` + cert := ` +-----BEGIN CERTIFICATE----- +MIIDdjCCAl6gAwIBAgIJAMYGAwkxUV51MA0GCSqGSIb3DQEBCwUAMFgxCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzERMA8G +A1UECgwISW5mbHV4REIxETAPBgNVBAMMCGluZmx1eGRiMB4XDTE1MTIyOTAxNTg1 +NloXDTI1MTIyNjAxNTg1NlowWDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRYw +FAYDVQQHDA1TYW4gRnJhbmNpc2NvMREwDwYDVQQKDAhJbmZsdXhEQjERMA8GA1UE +AwwIaW5mbHV4ZGIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLswqK +JLxfhBRi4qdj7+jpBxTAi4MewrcMPp+9YlbLke3F7w2DPrZVkYVeWmg8LyTPAigr +XeadK6hvqjRr05a7sMc5+ynivGbWUySWT+u17V85x6VR5TMIkJEOqpiIU8aYk0l+ +3UcrzVjS1QZCUBoxVwAVaSR6AXTA8YrVXdk/AI3f22dYiBjFmV4LJJkGjTaCnlDK +u54hMU1tWTyFcoY9TBzZ1XA+ng5RQ/QADeL2PYrTW4s/mLI3jfKKD53EI4uM2FjW +37ZfuxTamhCR7/lxM4COg9K70y5uebfqJvuoXAwXLOzVbdfF5b9fJFbL67kaK2ti +MT3Wt39mhXzclLTDAgMBAAGjQzBBMAwGA1UdEwQFMAMBAf8wCwYDVR0PBAQDAgQw +MBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZIhvcN +AQELBQADggEBAJxgHeduV9q2BuKnrt+sjXLGn/HwbMbgGbgFK6kUKJBWtv6Pa7JJ +m4teDmTMWiaeB2g4N2bmaWTuEZzzShNKG5roFeWm1ilFMAyzkb+VifN4YuDKH62F +3e259qsytiGbbJF3F//4sjfMw8qZVEPvspG1zKsASo0PpSOOUFmxcj0oMAXhnMrk +rRcbk6fufhyq0iZGl8ZLKTCrkjk0b3qlNs6UaRD9/XBB59VlQ8I338sfjV06edwY +jn5Amab0uyoFNEp70Y4WGxrxUTS1GAC1LCA13S7EnidD440UrnWALTarjmHAK6aW +war3JNM1mGB3o2iAtuOJlFIKLpI1x+1e8pI= +-----END CERTIFICATE----- +` + cer, err := tls.X509KeyPair([]byte(cert), []byte(key)) + + if err != nil { + t.Fatalf("Received error: %v", err) + } + + server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + server.TLS = &tls.Config{Certificates: []tls.Certificate{cer}} + server.TLS.BuildNameToCertificate() + server.StartTLS() + defer server.Close() + + certFile, _ := ioutil.TempFile("", "influx-cert-") + certFile.WriteString(cert) + certFile.Close() + defer os.Remove(certFile.Name()) + + u, _ := url.Parse(server.URL) + + tests := []struct { + name string + unsafeSsl bool + expected error + }{ + {name: "validate certificates", unsafeSsl: false, expected: errors.New("error")}, + {name: "not validate certificates", unsafeSsl: true, expected: nil}, + } + + for _, test := range tests { + config := client.Config{URL: *u, UnsafeSsl: test.unsafeSsl} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + query := client.Query{} + _, err = c.Query(query) + + if (test.expected == nil) != (err == nil) { + t.Fatalf("%s: expected %v. got %v. unsafeSsl: %v", test.name, test.expected, err, test.unsafeSsl) + } + } +} + +func TestChunkedResponse(t *testing.T) { + s := `{"results":[{},{}]}{"results":[{}]}` + r := client.NewChunkedResponse(strings.NewReader(s)) + resp, err := r.NextResponse() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } else if actual := len(resp.Results); actual != 2 { + t.Fatalf("unexpected number of results. expected %v, actual %v", 2, actual) + } + + resp, err = r.NextResponse() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } else if actual := len(resp.Results); actual != 1 { + t.Fatalf("unexpected number of results. expected %v, actual %v", 1, actual) + } + + resp, err = r.NextResponse() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } else if resp != nil { + t.Fatalf("unexpected response. expected %v, actual %v", nil, resp) + } +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/BUILD b/vendor/github.com/influxdata/influxdb/client/v2/BUILD index 817a562c231d..37d30e39aa4a 100644 --- a/vendor/github.com/influxdata/influxdb/client/v2/BUILD +++ b/vendor/github.com/influxdata/influxdb/client/v2/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -12,9 +7,24 @@ go_library( "udp.go", ], importpath = "github.com/influxdata/influxdb/client/v2", + visibility = ["//visibility:public"], deps = ["//vendor/github.com/influxdata/influxdb/models:go_default_library"], ) +go_test( + name = "go_default_test", + srcs = ["client_test.go"], + importpath = "github.com/influxdata/influxdb/client/v2", + library = ":go_default_library", +) + +go_test( + name = "go_default_xtest", + srcs = ["example_test.go"], + importpath = "github.com/influxdata/influxdb/client/v2_test", + deps = [":go_default_library"], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -26,4 +36,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client.go b/vendor/github.com/influxdata/influxdb/client/v2/client.go index 7ada440afc4c..becafd734d29 100644 --- a/vendor/github.com/influxdata/influxdb/client/v2/client.go +++ b/vendor/github.com/influxdata/influxdb/client/v2/client.go @@ -1,4 +1,4 @@ -package client +package client // import "github.com/influxdata/influxdb/client/v2" import ( "bytes" diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client_test.go b/vendor/github.com/influxdata/influxdb/client/v2/client_test.go new file mode 100644 index 000000000000..30ee09df4466 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/client_test.go @@ -0,0 +1,480 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "sync" + "testing" + "time" +) + +func TestUDPClient_Query(t *testing.T) { + config := UDPConfig{Addr: "localhost:8089"} + c, err := NewUDPClient(config) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + defer c.Close() + query := Query{} + _, err = c.Query(query) + if err == nil { + t.Error("Querying UDP client should fail") + } +} + +func TestUDPClient_Ping(t *testing.T) { + config := UDPConfig{Addr: "localhost:8089"} + c, err := NewUDPClient(config) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + defer c.Close() + + rtt, version, err := c.Ping(0) + if rtt != 0 || version != "" || err != nil { + t.Errorf("unexpected error. expected (%v, '%v', %v), actual (%v, '%v', %v)", 0, "", nil, rtt, version, err) + } +} + +func TestUDPClient_Write(t *testing.T) { + config := UDPConfig{Addr: "localhost:8089"} + c, err := NewUDPClient(config) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + defer c.Close() + + bp, err := NewBatchPoints(BatchPointsConfig{}) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + + fields := make(map[string]interface{}) + fields["value"] = 1.0 + pt, _ := NewPoint("cpu", make(map[string]string), fields) + bp.AddPoint(pt) + + err = c.Write(bp) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestUDPClient_BadAddr(t *testing.T) { + config := UDPConfig{Addr: "foobar@wahoo"} + c, err := NewUDPClient(config) + if err == nil { + defer c.Close() + t.Error("Expected resolve error") + } +} + +func TestUDPClient_Batches(t *testing.T) { + var logger writeLogger + var cl udpclient + + cl.conn = &logger + cl.payloadSize = 20 // should allow for two points per batch + + // expected point should look like this: "cpu a=1i" + fields := map[string]interface{}{"a": 1} + + p, _ := NewPoint("cpu", nil, fields, time.Time{}) + + bp, _ := NewBatchPoints(BatchPointsConfig{}) + + for i := 0; i < 9; i++ { + bp.AddPoint(p) + } + + if err := cl.Write(bp); err != nil { + t.Fatalf("Unexpected error during Write: %v", err) + } + + if len(logger.writes) != 5 { + t.Errorf("Mismatched write count: got %v, exp %v", len(logger.writes), 5) + } +} + +func TestUDPClient_Split(t *testing.T) { + var logger writeLogger + var cl udpclient + + cl.conn = &logger + cl.payloadSize = 1 // force one field per point + + fields := map[string]interface{}{"a": 1, "b": 2, "c": 3, "d": 4} + + p, _ := NewPoint("cpu", nil, fields, time.Unix(1, 0)) + + bp, _ := NewBatchPoints(BatchPointsConfig{}) + + bp.AddPoint(p) + + if err := cl.Write(bp); err != nil { + t.Fatalf("Unexpected error during Write: %v", err) + } + + if len(logger.writes) != len(fields) { + t.Errorf("Mismatched write count: got %v, exp %v", len(logger.writes), len(fields)) + } +} + +type writeLogger struct { + writes [][]byte +} + +func (w *writeLogger) Write(b []byte) (int, error) { + w.writes = append(w.writes, append([]byte(nil), b...)) + return len(b), nil +} + +func (w *writeLogger) Close() error { return nil } + +func TestClient_Query(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := Query{} + _, err := c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_BasicAuth(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + + if !ok { + t.Errorf("basic auth error") + } + if u != "username" { + t.Errorf("unexpected username, expected %q, actual %q", "username", u) + } + if p != "password" { + t.Errorf("unexpected password, expected %q, actual %q", "password", p) + } + var data Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL, Username: "username", Password: "password"} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := Query{} + _, err := c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Ping(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + _, _, err := c.Ping(0) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Concurrent_Use(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{}`)) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + var wg sync.WaitGroup + wg.Add(3) + n := 1000 + + go func() { + defer wg.Done() + bp, err := NewBatchPoints(BatchPointsConfig{}) + if err != nil { + t.Errorf("got error %v", err) + } + + for i := 0; i < n; i++ { + if err = c.Write(bp); err != nil { + t.Fatalf("got error %v", err) + } + } + }() + + go func() { + defer wg.Done() + var q Query + for i := 0; i < n; i++ { + if _, err := c.Query(q); err != nil { + t.Fatalf("got error %v", err) + } + } + }() + + go func() { + defer wg.Done() + for i := 0; i < n; i++ { + c.Ping(time.Second) + } + }() + wg.Wait() +} + +func TestClient_Write(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + bp, err := NewBatchPoints(BatchPointsConfig{}) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + err = c.Write(bp) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_UserAgent(t *testing.T) { + receivedUserAgent := "" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + receivedUserAgent = r.UserAgent() + + var data Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + _, err := http.Get(ts.URL) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + + tests := []struct { + name string + userAgent string + expected string + }{ + { + name: "Empty user agent", + userAgent: "", + expected: "InfluxDBClient", + }, + { + name: "Custom user agent", + userAgent: "Test Influx Client", + expected: "Test Influx Client", + }, + } + + for _, test := range tests { + + config := HTTPConfig{Addr: ts.URL, UserAgent: test.userAgent} + c, _ := NewHTTPClient(config) + defer c.Close() + + receivedUserAgent = "" + query := Query{} + _, err = c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + bp, _ := NewBatchPoints(BatchPointsConfig{}) + err = c.Write(bp) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + _, err := c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + if receivedUserAgent != test.expected { + t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + } +} + +func TestClient_PointString(t *testing.T) { + const shortForm = "2006-Jan-02" + time1, _ := time.Parse(shortForm, "2013-Feb-03") + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields, time1) + + s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000000000" + if p.String() != s { + t.Errorf("Point String Error, got %s, expected %s", p.String(), s) + } + + s = "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000" + if p.PrecisionString("ms") != s { + t.Errorf("Point String Error, got %s, expected %s", + p.PrecisionString("ms"), s) + } +} + +func TestClient_PointWithoutTimeString(t *testing.T) { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields) + + s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39" + if p.String() != s { + t.Errorf("Point String Error, got %s, expected %s", p.String(), s) + } + + if p.PrecisionString("ms") != s { + t.Errorf("Point String Error, got %s, expected %s", + p.PrecisionString("ms"), s) + } +} + +func TestClient_PointName(t *testing.T) { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields) + + exp := "cpu_usage" + if p.Name() != exp { + t.Errorf("Error, got %s, expected %s", + p.Name(), exp) + } +} + +func TestClient_PointTags(t *testing.T) { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields) + + if !reflect.DeepEqual(tags, p.Tags()) { + t.Errorf("Error, got %v, expected %v", + p.Tags(), tags) + } +} + +func TestClient_PointUnixNano(t *testing.T) { + const shortForm = "2006-Jan-02" + time1, _ := time.Parse(shortForm, "2013-Feb-03") + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields, time1) + + exp := int64(1359849600000000000) + if p.UnixNano() != exp { + t.Errorf("Error, got %d, expected %d", + p.UnixNano(), exp) + } +} + +func TestClient_PointFields(t *testing.T) { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields) + + if !reflect.DeepEqual(fields, p.Fields()) { + t.Errorf("Error, got %v, expected %v", + p.Fields(), fields) + } +} + +func TestBatchPoints_PrecisionError(t *testing.T) { + _, err := NewBatchPoints(BatchPointsConfig{Precision: "foobar"}) + if err == nil { + t.Errorf("Precision: foobar should have errored") + } + + bp, _ := NewBatchPoints(BatchPointsConfig{Precision: "ns"}) + err = bp.SetPrecision("foobar") + if err == nil { + t.Errorf("Precision: foobar should have errored") + } +} + +func TestBatchPoints_SettersGetters(t *testing.T) { + bp, _ := NewBatchPoints(BatchPointsConfig{ + Precision: "ns", + Database: "db", + RetentionPolicy: "rp", + WriteConsistency: "wc", + }) + if bp.Precision() != "ns" { + t.Errorf("Expected: %s, got %s", bp.Precision(), "ns") + } + if bp.Database() != "db" { + t.Errorf("Expected: %s, got %s", bp.Database(), "db") + } + if bp.RetentionPolicy() != "rp" { + t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp") + } + if bp.WriteConsistency() != "wc" { + t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc") + } + + bp.SetDatabase("db2") + bp.SetRetentionPolicy("rp2") + bp.SetWriteConsistency("wc2") + err := bp.SetPrecision("s") + if err != nil { + t.Errorf("Did not expect error: %s", err.Error()) + } + + if bp.Precision() != "s" { + t.Errorf("Expected: %s, got %s", bp.Precision(), "s") + } + if bp.Database() != "db2" { + t.Errorf("Expected: %s, got %s", bp.Database(), "db2") + } + if bp.RetentionPolicy() != "rp2" { + t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp2") + } + if bp.WriteConsistency() != "wc2" { + t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc2") + } +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/example_test.go b/vendor/github.com/influxdata/influxdb/client/v2/example_test.go new file mode 100644 index 000000000000..68bb24bc70d2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/example_test.go @@ -0,0 +1,265 @@ +package client_test + +import ( + "fmt" + "math/rand" + "os" + "time" + + "github.com/influxdata/influxdb/client/v2" +) + +// Create a new client +func ExampleClient() { + // NOTE: this assumes you've setup a user and have setup shell env variables, + // namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below. + _, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + Username: os.Getenv("INFLUX_USER"), + Password: os.Getenv("INFLUX_PWD"), + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } +} + +// Write a point using the UDP client +func ExampleClient_uDP() { + // Make client + config := client.UDPConfig{Addr: "localhost:8089"} + c, err := client.NewUDPClient(config) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + defer c.Close() + + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Precision: "s", + }) + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + bp.AddPoint(pt) + + // Write the batch + c.Write(bp) +} + +// Ping the cluster using the HTTP client +func ExampleClient_Ping() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + _, _, err = c.Ping(0) + if err != nil { + fmt.Println("Error pinging InfluxDB Cluster: ", err.Error()) + } +} + +// Write a point using the HTTP client +func ExampleClient_write() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Database: "BumbleBeeTuna", + Precision: "s", + }) + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + bp.AddPoint(pt) + + // Write the batch + c.Write(bp) +} + +// Create a batch and add a point +func ExampleBatchPoints() { + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Database: "BumbleBeeTuna", + Precision: "s", + }) + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + bp.AddPoint(pt) +} + +// Using the BatchPoints setter functions +func ExampleBatchPoints_setters() { + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{}) + bp.SetDatabase("BumbleBeeTuna") + bp.SetPrecision("ms") + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + bp.AddPoint(pt) +} + +// Create a new point with a timestamp +func ExamplePoint() { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err == nil { + fmt.Println("We created a point: ", pt.String()) + } +} + +// Create a new point without a timestamp +func ExamplePoint_withoutTime() { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields) + if err == nil { + fmt.Println("We created a point w/o time: ", pt.String()) + } +} + +// Write 1000 points +func ExampleClient_write1000() { + sampleSize := 1000 + + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + rand.Seed(42) + + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Database: "systemstats", + Precision: "us", + }) + + for i := 0; i < sampleSize; i++ { + regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"} + tags := map[string]string{ + "cpu": "cpu-total", + "host": fmt.Sprintf("host%d", rand.Intn(1000)), + "region": regions[rand.Intn(len(regions))], + } + + idle := rand.Float64() * 100.0 + fields := map[string]interface{}{ + "idle": idle, + "busy": 100.0 - idle, + } + + pt, err := client.NewPoint( + "cpu_usage", + tags, + fields, + time.Now(), + ) + if err != nil { + println("Error:", err.Error()) + continue + } + bp.AddPoint(pt) + } + + err = c.Write(bp) + if err != nil { + fmt.Println("Error: ", err.Error()) + } +} + +// Make a Query +func ExampleClient_query() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + q := client.NewQuery("SELECT count(value) FROM shapes", "square_holes", "ns") + if response, err := c.Query(q); err == nil && response.Error() == nil { + fmt.Println(response.Results) + } +} + +// Create a Database with a query +func ExampleClient_createDatabase() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + q := client.NewQuery("CREATE DATABASE telegraf", "", "") + if response, err := c.Query(q); err == nil && response.Error() == nil { + fmt.Println(response.Results) + } +} diff --git a/vendor/github.com/influxdata/influxdb/errors.go b/vendor/github.com/influxdata/influxdb/errors.go new file mode 100644 index 000000000000..13c782ad32aa --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/errors.go @@ -0,0 +1,44 @@ +package influxdb + +import ( + "errors" + "fmt" + "strings" +) + +// ErrFieldTypeConflict is returned when a new field already exists with a +// different type. +var ErrFieldTypeConflict = errors.New("field type conflict") + +// ErrDatabaseNotFound indicates that a database operation failed on the +// specified database because the specified database does not exist. +func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) } + +// ErrRetentionPolicyNotFound indicates that the named retention policy could +// not be found in the database. +func ErrRetentionPolicyNotFound(name string) error { + return fmt.Errorf("retention policy not found: %s", name) +} + +// IsClientError indicates whether an error is a known client error. +func IsClientError(err error) bool { + if err == nil { + return false + } + + if strings.HasPrefix(err.Error(), ErrFieldTypeConflict.Error()) { + return true + } + + return false +} + +const upgradeMessage = `******************************************************************* + UNSUPPORTED SHARD FORMAT DETECTED + +As of version 0.11, only tsm shards are supported. Please use the +influx_tsm tool to convert non-tsm shards. + +More information can be found at the documentation site: +https://docs.influxdata.com/influxdb/v0.10/administration/upgrading +*******************************************************************` diff --git a/vendor/github.com/influxdata/influxdb/gobuild.sh b/vendor/github.com/influxdata/influxdb/gobuild.sh new file mode 100755 index 000000000000..9a96e7e9b79c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/gobuild.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# This script run inside the Dockerfile_build_ubuntu64_git container and +# gets the latests Go source code and compiles it. +# Then passes control over to the normal build.py script + +set -e + +cd /go/src +git fetch --all +git checkout $GO_CHECKOUT +# Merge in recent changes if we are on a branch +# if we checked out a tag just ignore the error +git pull || true +./make.bash + +# Run normal build.py +cd "$PROJECT_DIR" +exec ./build.py "$@" diff --git a/vendor/github.com/influxdata/influxdb/influxdb.go b/vendor/github.com/influxdata/influxdb/influxdb.go new file mode 100644 index 000000000000..0befd2992d0a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxdb.go @@ -0,0 +1 @@ +package influxdb // import "github.com/influxdata/influxdb" diff --git a/vendor/github.com/influxdata/influxdb/models/BUILD b/vendor/github.com/influxdata/influxdb/models/BUILD index d0ab73ad3b5d..889d46406103 100644 --- a/vendor/github.com/influxdata/influxdb/models/BUILD +++ b/vendor/github.com/influxdata/influxdb/models/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -17,9 +12,28 @@ go_library( "time.go", ], importpath = "github.com/influxdata/influxdb/models", + visibility = ["//visibility:public"], deps = ["//vendor/github.com/influxdata/influxdb/pkg/escape:go_default_library"], ) +go_test( + name = "go_default_test", + srcs = ["inline_strconv_parse_test.go"], + importpath = "github.com/influxdata/influxdb/models", + library = ":go_default_library", +) + +go_test( + name = "go_default_xtest", + srcs = [ + "inline_fnv_test.go", + "points_test.go", + "statistic_test.go", + ], + importpath = "github.com/influxdata/influxdb/models_test", + deps = [":go_default_library"], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -31,4 +45,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/influxdata/influxdb/models/inline_fnv.go b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go index 9391946d82e4..dea25901cccc 100644 --- a/vendor/github.com/influxdata/influxdb/models/inline_fnv.go +++ b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go @@ -1,4 +1,4 @@ -package models +package models // import "github.com/influxdata/influxdb/models" // from stdlib hash/fnv/fnv.go const ( diff --git a/vendor/github.com/influxdata/influxdb/models/inline_fnv_test.go b/vendor/github.com/influxdata/influxdb/models/inline_fnv_test.go new file mode 100644 index 000000000000..62bc53ec0dce --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/inline_fnv_test.go @@ -0,0 +1,29 @@ +package models_test + +import ( + "hash/fnv" + "testing" + "testing/quick" + + "github.com/influxdata/influxdb/models" +) + +func TestInlineFNV64aEquivalenceFuzz(t *testing.T) { + f := func(data []byte) bool { + stdlibFNV := fnv.New64a() + stdlibFNV.Write(data) + want := stdlibFNV.Sum64() + + inlineFNV := models.NewInlineFNV64a() + inlineFNV.Write(data) + got := inlineFNV.Sum64() + + return want == got + } + cfg := &quick.Config{ + MaxCount: 10000, + } + if err := quick.Check(f, cfg); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go index 727ce35802e1..dcc8ae402ed4 100644 --- a/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go +++ b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go @@ -1,4 +1,4 @@ -package models +package models // import "github.com/influxdata/influxdb/models" import ( "reflect" diff --git a/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse_test.go b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse_test.go new file mode 100644 index 000000000000..119f543d78b0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse_test.go @@ -0,0 +1,103 @@ +package models + +import ( + "strconv" + "testing" + "testing/quick" +) + +func TestParseIntBytesEquivalenceFuzz(t *testing.T) { + f := func(b []byte, base int, bitSize int) bool { + exp, expErr := strconv.ParseInt(string(b), base, bitSize) + got, gotErr := parseIntBytes(b, base, bitSize) + + return exp == got && checkErrs(expErr, gotErr) + } + + cfg := &quick.Config{ + MaxCount: 10000, + } + + if err := quick.Check(f, cfg); err != nil { + t.Fatal(err) + } +} + +func TestParseIntBytesValid64bitBase10EquivalenceFuzz(t *testing.T) { + buf := []byte{} + f := func(n int64) bool { + buf = strconv.AppendInt(buf[:0], n, 10) + + exp, expErr := strconv.ParseInt(string(buf), 10, 64) + got, gotErr := parseIntBytes(buf, 10, 64) + + return exp == got && checkErrs(expErr, gotErr) + } + + cfg := &quick.Config{ + MaxCount: 10000, + } + + if err := quick.Check(f, cfg); err != nil { + t.Fatal(err) + } +} + +func TestParseFloatBytesEquivalenceFuzz(t *testing.T) { + f := func(b []byte, bitSize int) bool { + exp, expErr := strconv.ParseFloat(string(b), bitSize) + got, gotErr := parseFloatBytes(b, bitSize) + + return exp == got && checkErrs(expErr, gotErr) + } + + cfg := &quick.Config{ + MaxCount: 10000, + } + + if err := quick.Check(f, cfg); err != nil { + t.Fatal(err) + } +} + +func TestParseFloatBytesValid64bitEquivalenceFuzz(t *testing.T) { + buf := []byte{} + f := func(n float64) bool { + buf = strconv.AppendFloat(buf[:0], n, 'f', -1, 64) + + exp, expErr := strconv.ParseFloat(string(buf), 64) + got, gotErr := parseFloatBytes(buf, 64) + + return exp == got && checkErrs(expErr, gotErr) + } + + cfg := &quick.Config{ + MaxCount: 10000, + } + + if err := quick.Check(f, cfg); err != nil { + t.Fatal(err) + } +} + +func TestParseBoolBytesEquivalence(t *testing.T) { + var buf []byte + for _, s := range []string{"1", "t", "T", "TRUE", "true", "True", "0", "f", "F", "FALSE", "false", "False", "fail", "TrUe", "FAlSE", "numbers", ""} { + buf = append(buf[:0], s...) + + exp, expErr := strconv.ParseBool(s) + got, gotErr := parseBoolBytes(buf) + + if got != exp || !checkErrs(expErr, gotErr) { + t.Errorf("Failed to parse boolean value %q correctly: wanted (%t, %v), got (%t, %v)", s, exp, expErr, got, gotErr) + } + } +} + +func checkErrs(a, b error) bool { + if (a == nil) != (b == nil) { + return false + } + + return a == nil || a.Error() == b.Error() +} diff --git a/vendor/github.com/influxdata/influxdb/models/points.go b/vendor/github.com/influxdata/influxdb/models/points.go index a4104b8a3fd1..6d083f7e7d1a 100644 --- a/vendor/github.com/influxdata/influxdb/models/points.go +++ b/vendor/github.com/influxdata/influxdb/models/points.go @@ -1,4 +1,4 @@ -package models +package models // import "github.com/influxdata/influxdb/models" import ( "bytes" diff --git a/vendor/github.com/influxdata/influxdb/models/points_test.go b/vendor/github.com/influxdata/influxdb/models/points_test.go new file mode 100644 index 000000000000..0652964696ce --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/points_test.go @@ -0,0 +1,2170 @@ +package models_test + +import ( + "bytes" + "fmt" + "math" + "math/rand" + "reflect" + "strconv" + "strings" + "testing" + "time" + + "github.com/influxdata/influxdb/models" +) + +var ( + tags = models.NewTags(map[string]string{"foo": "bar", "apple": "orange", "host": "serverA", "region": "uswest"}) + fields = models.Fields{ + "int64": int64(math.MaxInt64), + "uint32": uint32(math.MaxUint32), + "string": "String field that has a decent length, probably some log message or something", + "boolean": false, + "float64-tiny": float64(math.SmallestNonzeroFloat64), + "float64-large": float64(math.MaxFloat64), + } + maxFloat64 = strconv.FormatFloat(math.MaxFloat64, 'f', 1, 64) + minFloat64 = strconv.FormatFloat(-math.MaxFloat64, 'f', 1, 64) +) + +func TestMarshal(t *testing.T) { + got := tags.HashKey() + if exp := ",apple=orange,foo=bar,host=serverA,region=uswest"; string(got) != exp { + t.Log("got: ", string(got)) + t.Log("exp: ", exp) + t.Error("invalid match") + } +} + +func BenchmarkMarshal(b *testing.B) { + for i := 0; i < b.N; i++ { + tags.HashKey() + } +} + +func TestPoint_StringSize(t *testing.T) { + testPoint_cube(t, func(p models.Point) { + l := p.StringSize() + s := p.String() + + if l != len(s) { + t.Errorf("Incorrect length for %q. got %v, exp %v", s, l, len(s)) + } + }) + +} + +func TestPoint_AppendString(t *testing.T) { + testPoint_cube(t, func(p models.Point) { + got := p.AppendString(nil) + exp := []byte(p.String()) + + if !reflect.DeepEqual(exp, got) { + t.Errorf("AppendString() didn't match String(): got %v, exp %v", got, exp) + } + }) +} + +func testPoint_cube(t *testing.T, f func(p models.Point)) { + // heard of a table-driven test? let's make a cube-driven test... + tagList := []models.Tags{nil, {models.Tag{Key: []byte("foo"), Value: []byte("bar")}}, tags} + fieldList := []models.Fields{{"a": 42.0}, {"a": 42, "b": "things"}, fields} + timeList := []time.Time{time.Time{}, time.Unix(0, 0), time.Unix(-34526, 0), time.Unix(231845, 0), time.Now()} + + for _, tagSet := range tagList { + for _, fieldSet := range fieldList { + for _, pointTime := range timeList { + p, err := models.NewPoint("test", tagSet, fieldSet, pointTime) + if err != nil { + t.Errorf("unexpected error creating point: %v", err) + continue + } + + f(p) + } + } + } +} + +var p models.Point + +func BenchmarkNewPoint(b *testing.B) { + ts := time.Now() + for i := 0; i < b.N; i++ { + p, _ = models.NewPoint("measurement", tags, fields, ts) + } +} + +func BenchmarkParsePointNoTags5000(b *testing.B) { + var batch [5000]string + for i := 0; i < len(batch); i++ { + batch[i] = `cpu value=1i 1000000000` + } + lines := strings.Join(batch[:], "\n") + b.ResetTimer() + for i := 0; i < b.N; i++ { + models.ParsePoints([]byte(lines)) + b.SetBytes(int64(len(lines))) + } +} + +func BenchmarkParsePointNoTags(b *testing.B) { + line := `cpu value=1i 1000000000` + for i := 0; i < b.N; i++ { + models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + } +} + +func BenchmarkParsePointWithPrecisionN(b *testing.B) { + line := `cpu value=1i 1000000000` + defaultTime := time.Now().UTC() + for i := 0; i < b.N; i++ { + models.ParsePointsWithPrecision([]byte(line), defaultTime, "n") + b.SetBytes(int64(len(line))) + } +} + +func BenchmarkParsePointWithPrecisionU(b *testing.B) { + line := `cpu value=1i 1000000000` + defaultTime := time.Now().UTC() + for i := 0; i < b.N; i++ { + models.ParsePointsWithPrecision([]byte(line), defaultTime, "u") + b.SetBytes(int64(len(line))) + } +} + +func BenchmarkParsePointsTagsSorted2(b *testing.B) { + line := `cpu,host=serverA,region=us-west value=1i 1000000000` + for i := 0; i < b.N; i++ { + models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + } +} + +func BenchmarkParsePointsTagsSorted5(b *testing.B) { + line := `cpu,env=prod,host=serverA,region=us-west,target=servers,zone=1c value=1i 1000000000` + for i := 0; i < b.N; i++ { + models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + } +} + +func BenchmarkParsePointsTagsSorted10(b *testing.B) { + line := `cpu,env=prod,host=serverA,region=us-west,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5,target=servers,zone=1c value=1i 1000000000` + for i := 0; i < b.N; i++ { + models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + } +} + +func BenchmarkParsePointsTagsUnSorted2(b *testing.B) { + line := `cpu,region=us-west,host=serverA value=1i 1000000000` + for i := 0; i < b.N; i++ { + pt, _ := models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + pt[0].Key() + } +} + +func BenchmarkParsePointsTagsUnSorted5(b *testing.B) { + line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c value=1i 1000000000` + for i := 0; i < b.N; i++ { + pt, _ := models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + pt[0].Key() + } +} + +func BenchmarkParsePointsTagsUnSorted10(b *testing.B) { + line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5 value=1i 1000000000` + for i := 0; i < b.N; i++ { + pt, _ := models.ParsePoints([]byte(line)) + b.SetBytes(int64(len(line))) + pt[0].Key() + } +} + +func BenchmarkParseKey(b *testing.B) { + line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5` + for i := 0; i < b.N; i++ { + models.ParseKey([]byte(line)) + } +} + +// TestPoint wraps a models.Point but also makes available the raw +// arguments to the Point. +// +// This is useful for ensuring that comparisons between results of +// operations on Points match the expected input data to the Point, +// since models.Point does not expose the raw input data (e.g., tags) +// via its API. +type TestPoint struct { + RawFields models.Fields + RawTags models.Tags + RawTime time.Time + models.Point +} + +// NewTestPoint returns a new TestPoint. +// +// NewTestPoint panics if it is not a valid models.Point. +func NewTestPoint(name string, tags models.Tags, fields models.Fields, time time.Time) TestPoint { + return TestPoint{ + RawTags: tags, + RawFields: fields, + RawTime: time, + Point: models.MustNewPoint(name, tags, fields, time), + } +} + +func test(t *testing.T, line string, point TestPoint) { + pts, err := models.ParsePointsWithPrecision([]byte(line), time.Unix(0, 0), "n") + if err != nil { + t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, line, err) + } + + if exp := 1; len(pts) != exp { + t.Fatalf(`ParsePoints("%s") len mismatch. got %d, exp %d`, line, len(pts), exp) + } + + if exp := point.Key(); !bytes.Equal(pts[0].Key(), exp) { + t.Errorf("ParsePoints(\"%s\") key mismatch.\ngot %v\nexp %v", line, string(pts[0].Key()), string(exp)) + } + + if exp := len(point.Tags()); len(pts[0].Tags()) != exp { + t.Errorf(`ParsePoints("%s") tags mismatch. got %v, exp %v`, line, pts[0].Tags(), exp) + } + + for _, tag := range pts[0].Tags() { + if !bytes.Equal(tag.Value, point.RawTags.Get(tag.Key)) { + t.Errorf(`ParsePoints("%s") tags mismatch. got %s, exp %s`, line, tag.Value, point.RawTags.Get(tag.Key)) + } + } + + for name, value := range point.RawFields { + val := pts[0].Fields()[name] + expfval, ok := val.(float64) + + if ok && math.IsNaN(expfval) { + gotfval, ok := value.(float64) + if ok && !math.IsNaN(gotfval) { + t.Errorf(`ParsePoints("%s") field '%s' mismatch. exp NaN`, line, name) + } + } else if !reflect.DeepEqual(pts[0].Fields()[name], value) { + t.Errorf(`ParsePoints("%s") field '%s' mismatch. got %[3]v (%[3]T), exp %[4]v (%[4]T)`, line, name, pts[0].Fields()[name], value) + } + } + + if !pts[0].Time().Equal(point.Time()) { + t.Errorf(`ParsePoints("%s") time mismatch. got %v, exp %v`, line, pts[0].Time(), point.Time()) + } + + if !strings.HasPrefix(pts[0].String(), line) { + t.Errorf("ParsePoints string mismatch.\ngot: %v\nexp: %v", pts[0].String(), line) + } +} + +func TestParsePointNoValue(t *testing.T) { + pts, err := models.ParsePointsString("") + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, "", err) + } + + if exp := 0; len(pts) != exp { + t.Errorf(`ParsePoints("%s") len mismatch. got %v, exp %v`, "", len(pts), exp) + } +} + +func TestParsePointWhitespaceValue(t *testing.T) { + pts, err := models.ParsePointsString(" ") + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, "", err) + } + + if exp := 0; len(pts) != exp { + t.Errorf(`ParsePoints("%s") len mismatch. got %v, exp %v`, "", len(pts), exp) + } +} + +func TestParsePointNoFields(t *testing.T) { + expectedSuffix := "missing fields" + examples := []string{ + "cpu_load_short,host=server01,region=us-west", + "cpu", + "cpu,host==", + "=", + } + + for i, example := range examples { + _, err := models.ParsePointsString(example) + if err == nil { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) + } else if !strings.HasSuffix(err.Error(), expectedSuffix) { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) + } + } +} + +func TestParsePointNoTimestamp(t *testing.T) { + test(t, "cpu value=1", NewTestPoint("cpu", nil, models.Fields{"value": 1.0}, time.Unix(0, 0))) +} + +func TestParsePointMissingQuote(t *testing.T) { + expectedSuffix := "unbalanced quotes" + examples := []string{ + `cpu,host=serverA value="test`, + `cpu,host=serverA value="test""`, + } + + for i, example := range examples { + _, err := models.ParsePointsString(example) + if err == nil { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) + } else if !strings.HasSuffix(err.Error(), expectedSuffix) { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) + } + } +} + +func TestParsePointMissingTagKey(t *testing.T) { + expectedSuffix := "missing tag key" + examples := []string{ + `cpu, value=1`, + `cpu,`, + `cpu,,,`, + `cpu,host=serverA,=us-east value=1i`, + `cpu,host=serverAa\,,=us-east value=1i`, + `cpu,host=serverA\,,=us-east value=1i`, + `cpu, =serverA value=1i`, + } + + for i, example := range examples { + _, err := models.ParsePointsString(example) + if err == nil { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) + } else if !strings.HasSuffix(err.Error(), expectedSuffix) { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) + } + } + + _, err := models.ParsePointsString(`cpu,host=serverA,\ =us-east value=1i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,\ =us-east value=1i`, err) + } +} + +func TestParsePointMissingTagValue(t *testing.T) { + expectedSuffix := "missing tag value" + examples := []string{ + `cpu,host`, + `cpu,host,`, + `cpu,host=`, + `cpu,host value=1i`, + `cpu,host=serverA,region value=1i`, + `cpu,host=serverA,region= value=1i`, + `cpu,host=serverA,region=,zone=us-west value=1i`, + } + + for i, example := range examples { + _, err := models.ParsePointsString(example) + if err == nil { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) + } else if !strings.HasSuffix(err.Error(), expectedSuffix) { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) + } + } +} + +func TestParsePointInvalidTagFormat(t *testing.T) { + expectedSuffix := "invalid tag format" + examples := []string{ + `cpu,host=f=o,`, + `cpu,host=f\==o,`, + } + + for i, example := range examples { + _, err := models.ParsePointsString(example) + if err == nil { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) + } else if !strings.HasSuffix(err.Error(), expectedSuffix) { + t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) + } + } +} + +func TestParsePointMissingFieldName(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west =`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west =`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west =123i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west =123i`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west a\ =123i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west a\ =123i`) + } + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=123i,=456i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=123i,=456i`) + } +} + +func TestParsePointMissingFieldValue(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value= 1000000000i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value= 1000000000i`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=,value2=1i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=,value2=1i`) + } + + _, err = models.ParsePointsString(`cpu,host=server01,region=us-west 1434055562000000000i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=server01,region=us-west 1434055562000000000i`) + } + + _, err = models.ParsePointsString(`cpu,host=server01,region=us-west value=1i,b`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=server01,region=us-west value=1i,b`) + } +} + +func TestParsePointBadNumber(t *testing.T) { + for _, tt := range []string{ + "cpu v=- ", + "cpu v=-i ", + "cpu v=-. ", + "cpu v=. ", + "cpu v=1.0i ", + "cpu v=1ii ", + "cpu v=1a ", + "cpu v=-e-e-e ", + "cpu v=42+3 ", + "cpu v= ", + } { + _, err := models.ParsePointsString(tt) + if err == nil { + t.Errorf("Point %q should be invalid", tt) + } + } +} + +func TestParsePointMaxInt64(t *testing.T) { + // out of range + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9223372036854775808i`) + exp := `unable to parse 'cpu,host=serverA,region=us-west value=9223372036854775808i': unable to parse integer 9223372036854775808: strconv.ParseInt: parsing "9223372036854775808": value out of range` + if err == nil || (err != nil && err.Error() != exp) { + t.Fatalf("Error mismatch:\nexp: %s\ngot: %v", exp, err) + } + + // max int + p, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9223372036854775807i`) + if err != nil { + t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=9223372036854775807i`, err) + } + if exp, got := int64(9223372036854775807), p[0].Fields()["value"].(int64); exp != got { + t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) + } + + // leading zeros + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=0009223372036854775807i`) + if err != nil { + t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0009223372036854775807i`, err) + } +} + +func TestParsePointMinInt64(t *testing.T) { + // out of range + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-9223372036854775809i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=-9223372036854775809i`) + } + + // min int + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=-9223372036854775808i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-9223372036854775808i`, err) + } + + // leading zeros + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=-0009223372036854775808i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-0009223372036854775808i`, err) + } +} + +func TestParsePointMaxFloat64(t *testing.T) { + // out of range + _, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "1"+string(maxFloat64))) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=...`) + } + + // max float + _, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, string(maxFloat64))) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=9223372036854775807`, err) + } + + // leading zeros + _, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "0000"+string(maxFloat64))) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0009223372036854775807`, err) + } +} + +func TestParsePointMinFloat64(t *testing.T) { + // out of range + _, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "-1"+string(minFloat64)[1:])) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=...`) + } + + // min float + _, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, string(minFloat64))) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=...`, err) + } + + // leading zeros + _, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "-0000000"+string(minFloat64)[1:])) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=...`, err) + } +} + +func TestParsePointNumberNonNumeric(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=.1a`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=.1a`) + } +} + +func TestParsePointNegativeWrongPlace(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=0.-1`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=0.-1`) + } +} + +func TestParsePointOnlyNegativeSign(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=-`) + } +} + +func TestParsePointFloatMultipleDecimals(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.1.1`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=1.1.1`) + } +} + +func TestParsePointInteger(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1i`, err) + } +} + +func TestParsePointNegativeInteger(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1i`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1i`, err) + } +} + +func TestParsePointNegativeFloat(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1.0`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0`, err) + } +} + +func TestParsePointFloatNoLeadingDigit(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=.1`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0`, err) + } +} + +func TestParsePointFloatScientific(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0e4`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e4`, err) + } + + pts, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1e4`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e4`, err) + } + + if pts[0].Fields()["value"] != 1e4 { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1e4`, err) + } +} + +func TestParsePointFloatScientificUpper(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0E4`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0E4`, err) + } + + pts, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1E4`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0E4`, err) + } + + if pts[0].Fields()["value"] != 1e4 { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1E4`, err) + } +} + +func TestParsePointFloatScientificDecimal(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0e-4`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e-4`, err) + } +} + +func TestParsePointFloatNegativeScientific(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1.0e-4`) + if err != nil { + t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0e-4`, err) + } +} + +func TestParsePointBooleanInvalid(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=a`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=a`) + } +} + +func TestParsePointScientificIntInvalid(t *testing.T) { + _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9ie10`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=9ie10`) + } + + _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=9e10i`) + if err == nil { + t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=9e10i`) + } +} + +func TestParsePointWhitespace(t *testing.T) { + examples := []string{ + `cpu value=1.0 1257894000000000000`, + `cpu value=1.0 1257894000000000000`, + `cpu value=1.0 1257894000000000000`, + `cpu value=1.0 1257894000000000000 `, + `cpu value=1.0 1257894000000000000 +`, + `cpu value=1.0 1257894000000000000 +`, + } + + expPoint := NewTestPoint("cpu", models.Tags{}, models.Fields{"value": 1.0}, time.Unix(0, 1257894000000000000)) + for i, example := range examples { + pts, err := models.ParsePoints([]byte(example)) + if err != nil { + t.Fatalf(`[Example %d] ParsePoints("%s") error. got %v, exp nil`, i, example, err) + } + + if got, exp := len(pts), 1; got != exp { + t.Fatalf("[Example %d] got %d points, expected %d", i, got, exp) + } + + if got, exp := pts[0].Name(), expPoint.Name(); got != exp { + t.Fatalf("[Example %d] got %v measurement, expected %v", i, got, exp) + } + + if got, exp := len(pts[0].Fields()), len(expPoint.Fields()); got != exp { + t.Fatalf("[Example %d] got %d fields, expected %d", i, got, exp) + } + + if got, exp := pts[0].Fields()["value"], expPoint.Fields()["value"]; got != exp { + t.Fatalf(`[Example %d] got %v for field "value", expected %v`, i, got, exp) + } + + if got, exp := pts[0].Time().UnixNano(), expPoint.Time().UnixNano(); got != exp { + t.Fatalf(`[Example %d] got %d time, expected %d`, i, got, exp) + } + } +} + +func TestParsePointUnescape(t *testing.T) { + // commas in measurement name + test(t, `foo\,bar value=1i`, + NewTestPoint( + "foo,bar", // comma in the name + models.NewTags(map[string]string{}), + models.Fields{ + "value": int64(1), + }, + time.Unix(0, 0))) + + // comma in measurement name with tags + test(t, `cpu\,main,regions=east value=1.0`, + NewTestPoint( + "cpu,main", // comma in the name + models.NewTags(map[string]string{ + "regions": "east", + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // spaces in measurement name + test(t, `cpu\ load,region=east value=1.0`, + NewTestPoint( + "cpu load", // space in the name + models.NewTags(map[string]string{ + "region": "east", + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // equals in measurement name + test(t, `cpu\=load,region=east value=1.0`, + NewTestPoint( + `cpu\=load`, // backslash is literal + models.NewTags(map[string]string{ + "region": "east", + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // equals in measurement name + test(t, `cpu=load,region=east value=1.0`, + NewTestPoint( + `cpu=load`, // literal equals is fine in measurement name + models.NewTags(map[string]string{ + "region": "east", + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // commas in tag names + test(t, `cpu,region\,zone=east value=1.0`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "region,zone": "east", // comma in the tag key + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // spaces in tag name + test(t, `cpu,region\ zone=east value=1.0`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "region zone": "east", // space in the tag name + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // backslash with escaped equals in tag name + test(t, `cpu,reg\\=ion=east value=1.0`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + `reg\=ion`: "east", + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // space is tag name + test(t, `cpu,\ =east value=1.0`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + " ": "east", // tag name is single space + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // commas in tag values + test(t, `cpu,regions=east\,west value=1.0`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "regions": "east,west", // comma in the tag value + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // backslash literal followed by escaped space + test(t, `cpu,regions=\\ east value=1.0`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "regions": `\ east`, + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // backslash literal followed by escaped space + test(t, `cpu,regions=eas\\ t value=1.0`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "regions": `eas\ t`, + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // backslash literal followed by trailing space + test(t, `cpu,regions=east\\ value=1.0`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "regions": `east\ `, + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // spaces in tag values + test(t, `cpu,regions=east\ west value=1.0`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "regions": "east west", // comma in the tag value + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // commas in field keys + test(t, `cpu,regions=east value\,ms=1.0`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "regions": "east", + }), + models.Fields{ + "value,ms": 1.0, // comma in the field keys + }, + time.Unix(0, 0))) + + // spaces in field keys + test(t, `cpu,regions=east value\ ms=1.0`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "regions": "east", + }), + models.Fields{ + "value ms": 1.0, // comma in the field keys + }, + time.Unix(0, 0))) + + // tag with no value + test(t, `cpu,regions=east value="1"`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "regions": "east", + "foobar": "", + }), + models.Fields{ + "value": "1", + }, + time.Unix(0, 0))) + + // commas in field values + test(t, `cpu,regions=east value="1,0"`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "regions": "east", + }), + models.Fields{ + "value": "1,0", // comma in the field value + }, + time.Unix(0, 0))) + + // random character escaped + test(t, `cpu,regions=eas\t value=1.0`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "regions": "eas\\t", + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // backslash literal followed by escaped characters + test(t, `cpu,regions=\\,\,\=east value=1.0`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "regions": `\,,=east`, + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, 0))) + + // field keys using escape char. + test(t, `cpu \a=1i`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "\\a": int64(1), // Left as parsed since it's not a known escape sequence. + }, + time.Unix(0, 0))) + + // measurement, tag and tag value with equals + test(t, `cpu=load,equals\=foo=tag\=value value=1i`, + NewTestPoint( + "cpu=load", // Not escaped + models.NewTags(map[string]string{ + "equals=foo": "tag=value", // Tag and value unescaped + }), + models.Fields{ + "value": int64(1), + }, + time.Unix(0, 0))) + +} + +func TestParsePointWithTags(t *testing.T) { + test(t, + "cpu,host=serverA,region=us-east value=1.0 1000000000", + NewTestPoint("cpu", + models.NewTags(map[string]string{"host": "serverA", "region": "us-east"}), + models.Fields{"value": 1.0}, time.Unix(1, 0))) +} + +func TestParsePointWithDuplicateTags(t *testing.T) { + for i, tt := range []struct { + line string + err string + }{ + { + line: `cpu,host=serverA,host=serverB value=1i 1000000000`, + err: `unable to parse 'cpu,host=serverA,host=serverB value=1i 1000000000': duplicate tags`, + }, + { + line: `cpu,b=2,b=1,c=3 value=1i 1000000000`, + err: `unable to parse 'cpu,b=2,b=1,c=3 value=1i 1000000000': duplicate tags`, + }, + { + line: `cpu,b=2,c=3,b=1 value=1i 1000000000`, + err: `unable to parse 'cpu,b=2,c=3,b=1 value=1i 1000000000': duplicate tags`, + }, + } { + _, err := models.ParsePointsString(tt.line) + if err == nil || tt.err != err.Error() { + t.Errorf("%d. ParsePoint() expected error '%s'. got '%s'", i, tt.err, err) + } + } +} + +func TestParsePointWithStringField(t *testing.T) { + test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo",str2="bar" 1000000000`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": 1.0, + "str": "foo", + "str2": "bar", + }, + time.Unix(1, 0)), + ) + + test(t, `cpu,host=serverA,region=us-east str="foo \" bar" 1000000000`, + NewTestPoint("cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "str": `foo " bar`, + }, + time.Unix(1, 0)), + ) + +} + +func TestParsePointWithStringWithSpaces(t *testing.T) { + test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo bar" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": 1.0, + "str": "foo bar", // spaces in string value + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointWithStringWithNewline(t *testing.T) { + test(t, "cpu,host=serverA,region=us-east value=1.0,str=\"foo\nbar\" 1000000000", + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": 1.0, + "str": "foo\nbar", // newline in string value + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointWithStringWithCommas(t *testing.T) { + // escaped comma + test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo\,bar" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": 1.0, + "str": `foo\,bar`, // commas in string value + }, + time.Unix(1, 0)), + ) + + // non-escaped comma + test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo,bar" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": 1.0, + "str": "foo,bar", // commas in string value + }, + time.Unix(1, 0)), + ) + + // string w/ trailing escape chars + test(t, `cpu,host=serverA,region=us-east str="foo\\",str2="bar" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "str": "foo\\", // trailing escape char + "str2": "bar", + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointQuotedMeasurement(t *testing.T) { + // non-escaped comma + test(t, `"cpu",host=serverA,region=us-east value=1.0 1000000000`, + NewTestPoint( + `"cpu"`, + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointQuotedTags(t *testing.T) { + test(t, `cpu,"host"="serverA",region=us-east value=1.0 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + `"host"`: `"serverA"`, + "region": "us-east", + }), + models.Fields{ + "value": 1.0, + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointsUnbalancedQuotedTags(t *testing.T) { + pts, err := models.ParsePointsString("baz,mytag=\"a x=1 1441103862125\nbaz,mytag=a z=1 1441103862126") + if err != nil { + t.Fatalf("ParsePoints failed: %v", err) + } + + if exp := 2; len(pts) != exp { + t.Fatalf("ParsePoints count mismatch. got %v, exp %v", len(pts), exp) + } + + // Expected " in the tag value + exp := models.MustNewPoint("baz", models.NewTags(map[string]string{"mytag": `"a`}), + models.Fields{"x": float64(1)}, time.Unix(0, 1441103862125)) + + if pts[0].String() != exp.String() { + t.Errorf("Point mismatch:\ngot: %v\nexp: %v", pts[0].String(), exp.String()) + } + + // Expected two points to ensure we did not overscan the line + exp = models.MustNewPoint("baz", models.NewTags(map[string]string{"mytag": `a`}), + models.Fields{"z": float64(1)}, time.Unix(0, 1441103862126)) + + if pts[1].String() != exp.String() { + t.Errorf("Point mismatch:\ngot: %v\nexp: %v", pts[1].String(), exp.String()) + } + +} + +func TestParsePointEscapedStringsAndCommas(t *testing.T) { + // non-escaped comma and quotes + test(t, `cpu,host=serverA,region=us-east value="{Hello\"{,}\" World}" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": `{Hello"{,}" World}`, + }, + time.Unix(1, 0)), + ) + + // escaped comma and quotes + test(t, `cpu,host=serverA,region=us-east value="{Hello\"{\,}\" World}" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": `{Hello"{\,}" World}`, + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointWithStringWithEquals(t *testing.T) { + test(t, `cpu,host=serverA,region=us-east str="foo=bar",value=1.0 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": 1.0, + "str": "foo=bar", // spaces in string value + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointWithStringWithBackslash(t *testing.T) { + test(t, `cpu value="test\\\"" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": `test\"`, + }, + time.Unix(1, 0)), + ) + + test(t, `cpu value="test\\" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": `test\`, + }, + time.Unix(1, 0)), + ) + + test(t, `cpu value="test\\\"" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": `test\"`, + }, + time.Unix(1, 0)), + ) + + test(t, `cpu value="test\"" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": `test"`, + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointWithBoolField(t *testing.T) { + test(t, `cpu,host=serverA,region=us-east true=true,t=t,T=T,TRUE=TRUE,True=True,false=false,f=f,F=F,FALSE=FALSE,False=False 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "t": true, + "T": true, + "true": true, + "True": true, + "TRUE": true, + "f": false, + "F": false, + "false": false, + "False": false, + "FALSE": false, + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointUnicodeString(t *testing.T) { + test(t, `cpu,host=serverA,region=us-east value="wè" 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{ + "host": "serverA", + "region": "us-east", + }), + models.Fields{ + "value": "wè", + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointNegativeTimestamp(t *testing.T) { + test(t, `cpu value=1 -1`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, -1)), + ) +} + +func TestParsePointMaxTimestamp(t *testing.T) { + test(t, fmt.Sprintf(`cpu value=1 %d`, models.MaxNanoTime), + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, models.MaxNanoTime)), + ) +} + +func TestParsePointMinTimestamp(t *testing.T) { + test(t, `cpu value=1 -9223372036854775806`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": 1.0, + }, + time.Unix(0, models.MinNanoTime)), + ) +} + +func TestParsePointInvalidTimestamp(t *testing.T) { + examples := []string{ + "cpu value=1 9223372036854775808", + "cpu value=1 -92233720368547758078", + "cpu value=1 -", + "cpu value=1 -/", + "cpu value=1 -1?", + "cpu value=1 1-", + "cpu value=1 9223372036854775807 12", + } + + for i, example := range examples { + _, err := models.ParsePointsString(example) + if err == nil { + t.Fatalf("[Example %d] ParsePoints failed: %v", i, err) + } + } +} + +func TestNewPointFloatWithoutDecimal(t *testing.T) { + test(t, `cpu value=1 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": 1.0, + }, + time.Unix(1, 0)), + ) +} +func TestNewPointNegativeFloat(t *testing.T) { + test(t, `cpu value=-0.64 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": -0.64, + }, + time.Unix(1, 0)), + ) +} + +func TestNewPointFloatNoDecimal(t *testing.T) { + test(t, `cpu value=1. 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": 1.0, + }, + time.Unix(1, 0)), + ) +} + +func TestNewPointFloatScientific(t *testing.T) { + test(t, `cpu value=6.632243e+06 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": float64(6632243), + }, + time.Unix(1, 0)), + ) +} + +func TestNewPointLargeInteger(t *testing.T) { + test(t, `cpu value=6632243i 1000000000`, + NewTestPoint( + "cpu", + models.NewTags(map[string]string{}), + models.Fields{ + "value": int64(6632243), // if incorrectly encoded as a float, it would show up as 6.632243e+06 + }, + time.Unix(1, 0)), + ) +} + +func TestParsePointNaN(t *testing.T) { + _, err := models.ParsePointsString("cpu value=NaN 1000000000") + if err == nil { + t.Fatalf("ParsePoints expected error, got nil") + } + + _, err = models.ParsePointsString("cpu value=nAn 1000000000") + if err == nil { + t.Fatalf("ParsePoints expected error, got nil") + } + + _, err = models.ParsePointsString("cpu value=NaN") + if err == nil { + t.Fatalf("ParsePoints expected error, got nil") + } +} + +func TestNewPointLargeNumberOfTags(t *testing.T) { + tags := "" + for i := 0; i < 255; i++ { + tags += fmt.Sprintf(",tag%d=value%d", i, i) + } + + pt, err := models.ParsePointsString(fmt.Sprintf("cpu%s value=1", tags)) + if err != nil { + t.Fatalf("ParsePoints() with max tags failed: %v", err) + } + + if len(pt[0].Tags()) != 255 { + t.Fatalf("expected %d tags, got %d", 255, len(pt[0].Tags())) + } +} + +func TestParsePointIntsFloats(t *testing.T) { + pts, err := models.ParsePoints([]byte(`cpu,host=serverA,region=us-east int=10i,float=11.0,float2=12.1 1000000000`)) + if err != nil { + t.Fatalf(`ParsePoints() failed. got %s`, err) + } + + if exp := 1; len(pts) != exp { + t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) + } + pt := pts[0] + + if _, ok := pt.Fields()["int"].(int64); !ok { + t.Errorf("ParsePoint() int field mismatch: got %T, exp %T", pt.Fields()["int"], int64(10)) + } + + if _, ok := pt.Fields()["float"].(float64); !ok { + t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", pt.Fields()["float64"], float64(11.0)) + } + + if _, ok := pt.Fields()["float2"].(float64); !ok { + t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", pt.Fields()["float64"], float64(12.1)) + } +} + +func TestParsePointKeyUnsorted(t *testing.T) { + pts, err := models.ParsePoints([]byte("cpu,last=1,first=2 value=1i")) + if err != nil { + t.Fatalf(`ParsePoints() failed. got %s`, err) + } + + if exp := 1; len(pts) != exp { + t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) + } + pt := pts[0] + + if exp := "cpu,first=2,last=1"; string(pt.Key()) != exp { + t.Errorf("ParsePoint key not sorted. got %v, exp %v", string(pt.Key()), exp) + } +} + +func TestParsePointToString(t *testing.T) { + line := `cpu,host=serverA,region=us-east bool=false,float=11,float2=12.123,int=10i,str="string val" 1000000000` + pts, err := models.ParsePoints([]byte(line)) + if err != nil { + t.Fatalf(`ParsePoints() failed. got %s`, err) + } + if exp := 1; len(pts) != exp { + t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) + } + pt := pts[0] + + got := pt.String() + if line != got { + t.Errorf("ParsePoint() to string mismatch:\n got %v\n exp %v", got, line) + } + + pt = models.MustNewPoint("cpu", models.NewTags(map[string]string{"host": "serverA", "region": "us-east"}), + models.Fields{"int": 10, "float": float64(11.0), "float2": float64(12.123), "bool": false, "str": "string val"}, + time.Unix(1, 0)) + + got = pt.String() + if line != got { + t.Errorf("NewPoint() to string mismatch:\n got %v\n exp %v", got, line) + } +} + +func TestParsePointsWithPrecision(t *testing.T) { + tests := []struct { + name string + line string + precision string + exp string + }{ + { + name: "nanosecond by default", + line: `cpu,host=serverA,region=us-east value=1.0 946730096789012345`, + precision: "", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + }, + { + name: "nanosecond", + line: `cpu,host=serverA,region=us-east value=1.0 946730096789012345`, + precision: "n", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + }, + { + name: "microsecond", + line: `cpu,host=serverA,region=us-east value=1.0 946730096789012`, + precision: "u", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012000", + }, + { + name: "millisecond", + line: `cpu,host=serverA,region=us-east value=1.0 946730096789`, + precision: "ms", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789000000", + }, + { + name: "second", + line: `cpu,host=serverA,region=us-east value=1.0 946730096`, + precision: "s", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096000000000", + }, + { + name: "minute", + line: `cpu,host=serverA,region=us-east value=1.0 15778834`, + precision: "m", + exp: "cpu,host=serverA,region=us-east value=1.0 946730040000000000", + }, + { + name: "hour", + line: `cpu,host=serverA,region=us-east value=1.0 262980`, + precision: "h", + exp: "cpu,host=serverA,region=us-east value=1.0 946728000000000000", + }, + } + for _, test := range tests { + pts, err := models.ParsePointsWithPrecision([]byte(test.line), time.Now().UTC(), test.precision) + if err != nil { + t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err) + } + if exp := 1; len(pts) != exp { + t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, len(pts), exp) + } + pt := pts[0] + + got := pt.String() + if got != test.exp { + t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp) + } + } +} + +func TestParsePointsWithPrecisionNoTime(t *testing.T) { + line := `cpu,host=serverA,region=us-east value=1.0` + tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z") + tests := []struct { + name string + precision string + exp string + }{ + { + name: "no precision", + precision: "", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + }, + { + name: "nanosecond precision", + precision: "n", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + }, + { + name: "microsecond precision", + precision: "u", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012000", + }, + { + name: "millisecond precision", + precision: "ms", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789000000", + }, + { + name: "second precision", + precision: "s", + exp: "cpu,host=serverA,region=us-east value=1.0 946730096000000000", + }, + { + name: "minute precision", + precision: "m", + exp: "cpu,host=serverA,region=us-east value=1.0 946730040000000000", + }, + { + name: "hour precision", + precision: "h", + exp: "cpu,host=serverA,region=us-east value=1.0 946728000000000000", + }, + } + + for _, test := range tests { + pts, err := models.ParsePointsWithPrecision([]byte(line), tm, test.precision) + if err != nil { + t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err) + } + if exp := 1; len(pts) != exp { + t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, len(pts), exp) + } + pt := pts[0] + + got := pt.String() + if got != test.exp { + t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp) + } + } +} + +func TestParsePointsWithPrecisionComments(t *testing.T) { + tests := []struct { + name string + batch string + exp string + lenPoints int + }{ + { + name: "comment only", + batch: `# comment only`, + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + lenPoints: 0, + }, + { + name: "point with comment above", + batch: `# a point is below +cpu,host=serverA,region=us-east value=1.0 946730096789012345`, + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + lenPoints: 1, + }, + { + name: "point with comment below", + batch: `cpu,host=serverA,region=us-east value=1.0 946730096789012345 +# end of points`, + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + lenPoints: 1, + }, + { + name: "indented comment", + batch: ` # a point is below +cpu,host=serverA,region=us-east value=1.0 946730096789012345`, + exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", + lenPoints: 1, + }, + } + for _, test := range tests { + pts, err := models.ParsePointsWithPrecision([]byte(test.batch), time.Now().UTC(), "") + if err != nil { + t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err) + } + pointsLength := len(pts) + if exp := test.lenPoints; pointsLength != exp { + t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, pointsLength, exp) + } + + if pointsLength > 0 { + pt := pts[0] + + got := pt.String() + if got != test.exp { + t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp) + } + } + } +} + +func TestNewPointEscaped(t *testing.T) { + // commas + pt := models.MustNewPoint("cpu,main", models.NewTags(map[string]string{"tag,bar": "value"}), models.Fields{"name,bar": 1.0}, time.Unix(0, 0)) + if exp := `cpu\,main,tag\,bar=value name\,bar=1 0`; pt.String() != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } + + // spaces + pt = models.MustNewPoint("cpu main", models.NewTags(map[string]string{"tag bar": "value"}), models.Fields{"name bar": 1.0}, time.Unix(0, 0)) + if exp := `cpu\ main,tag\ bar=value name\ bar=1 0`; pt.String() != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } + + // equals + pt = models.MustNewPoint("cpu=main", models.NewTags(map[string]string{"tag=bar": "value=foo"}), models.Fields{"name=bar": 1.0}, time.Unix(0, 0)) + if exp := `cpu=main,tag\=bar=value\=foo name\=bar=1 0`; pt.String() != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } +} + +func TestNewPointWithoutField(t *testing.T) { + _, err := models.NewPoint("cpu", models.NewTags(map[string]string{"tag": "bar"}), models.Fields{}, time.Unix(0, 0)) + if err == nil { + t.Fatalf(`NewPoint() expected error. got nil`) + } +} + +func TestNewPointUnhandledType(t *testing.T) { + // nil value + pt := models.MustNewPoint("cpu", nil, models.Fields{"value": nil}, time.Unix(0, 0)) + if exp := `cpu value= 0`; pt.String() != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } + + // unsupported type gets stored as string + now := time.Unix(0, 0).UTC() + pt = models.MustNewPoint("cpu", nil, models.Fields{"value": now}, time.Unix(0, 0)) + if exp := `cpu value="1970-01-01 00:00:00 +0000 UTC" 0`; pt.String() != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } + + if exp := "1970-01-01 00:00:00 +0000 UTC"; pt.Fields()["value"] != exp { + t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) + } +} + +func TestMakeKeyEscaped(t *testing.T) { + if exp, got := `cpu\ load`, models.MakeKey([]byte(`cpu\ load`), models.NewTags(map[string]string{})); string(got) != exp { + t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) + } + + if exp, got := `cpu\ load`, models.MakeKey([]byte(`cpu load`), models.NewTags(map[string]string{})); string(got) != exp { + t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) + } + + if exp, got := `cpu\,load`, models.MakeKey([]byte(`cpu\,load`), models.NewTags(map[string]string{})); string(got) != exp { + t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) + } + + if exp, got := `cpu\,load`, models.MakeKey([]byte(`cpu,load`), models.NewTags(map[string]string{})); string(got) != exp { + t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) + } + +} + +func TestPrecisionString(t *testing.T) { + tags := map[string]interface{}{"value": float64(1)} + tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z") + tests := []struct { + name string + precision string + exp string + }{ + { + name: "no precision", + precision: "", + exp: "cpu value=1 946730096789012345", + }, + { + name: "nanosecond precision", + precision: "ns", + exp: "cpu value=1 946730096789012345", + }, + { + name: "microsecond precision", + precision: "u", + exp: "cpu value=1 946730096789012", + }, + { + name: "millisecond precision", + precision: "ms", + exp: "cpu value=1 946730096789", + }, + { + name: "second precision", + precision: "s", + exp: "cpu value=1 946730096", + }, + { + name: "minute precision", + precision: "m", + exp: "cpu value=1 15778834", + }, + { + name: "hour precision", + precision: "h", + exp: "cpu value=1 262980", + }, + } + + for _, test := range tests { + pt := models.MustNewPoint("cpu", nil, tags, tm) + act := pt.PrecisionString(test.precision) + + if act != test.exp { + t.Errorf("%s: PrecisionString() mismatch:\n actual: %v\n exp: %v", + test.name, act, test.exp) + } + } +} + +func TestRoundedString(t *testing.T) { + tags := map[string]interface{}{"value": float64(1)} + tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z") + tests := []struct { + name string + precision time.Duration + exp string + }{ + { + name: "no precision", + precision: time.Duration(0), + exp: "cpu value=1 946730096789012345", + }, + { + name: "nanosecond precision", + precision: time.Nanosecond, + exp: "cpu value=1 946730096789012345", + }, + { + name: "microsecond precision", + precision: time.Microsecond, + exp: "cpu value=1 946730096789012000", + }, + { + name: "millisecond precision", + precision: time.Millisecond, + exp: "cpu value=1 946730096789000000", + }, + { + name: "second precision", + precision: time.Second, + exp: "cpu value=1 946730097000000000", + }, + { + name: "minute precision", + precision: time.Minute, + exp: "cpu value=1 946730100000000000", + }, + { + name: "hour precision", + precision: time.Hour, + exp: "cpu value=1 946731600000000000", + }, + } + + for _, test := range tests { + pt := models.MustNewPoint("cpu", nil, tags, tm) + act := pt.RoundedString(test.precision) + + if act != test.exp { + t.Errorf("%s: RoundedString() mismatch:\n actual: %v\n exp: %v", + test.name, act, test.exp) + } + } +} + +func TestParsePointsStringWithExtraBuffer(t *testing.T) { + b := make([]byte, 70*5000) + buf := bytes.NewBuffer(b) + key := "cpu,host=A,region=uswest" + buf.WriteString(fmt.Sprintf("%s value=%.3f 1\n", key, rand.Float64())) + + points, err := models.ParsePointsString(buf.String()) + if err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + + pointKey := string(points[0].Key()) + + if len(key) != len(pointKey) { + t.Fatalf("expected length of both keys are same but got %d and %d", len(key), len(pointKey)) + } + + if key != pointKey { + t.Fatalf("expected both keys are same but got %s and %s", key, pointKey) + } +} + +func TestParsePointsQuotesInFieldKey(t *testing.T) { + buf := `cpu "a=1 +cpu value=2 1` + points, err := models.ParsePointsString(buf) + if err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + + pointFields := points[0].Fields() + value, ok := pointFields["\"a"] + if !ok { + t.Fatalf("expected to parse field '\"a'") + } + + if value != float64(1) { + t.Fatalf("expected field value to be 1, got %v", value) + } + + // The following input should not parse + buf = `cpu "\, '= "\ v=1.0` + _, err = models.ParsePointsString(buf) + if err == nil { + t.Fatalf("expected parsing failure but got no error") + } +} + +func TestParsePointsQuotesInTags(t *testing.T) { + buf := `t159,label=hey\ "ya a=1i,value=0i +t159,label=another a=2i,value=1i 1` + points, err := models.ParsePointsString(buf) + if err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + + if len(points) != 2 { + t.Fatalf("expected 2 points, got %d", len(points)) + } +} + +func TestParsePointsBlankLine(t *testing.T) { + buf := `cpu value=1i 1000000000 + +cpu value=2i 2000000000` + points, err := models.ParsePointsString(buf) + if err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + + if len(points) != 2 { + t.Fatalf("expected 2 points, got %d", len(points)) + } +} + +func TestNewPointsWithBytesWithCorruptData(t *testing.T) { + corrupted := []byte{0, 0, 0, 3, 102, 111, 111, 0, 0, 0, 4, 61, 34, 65, 34, 1, 0, 0, 0, 14, 206, 86, 119, 24, 32, 72, 233, 168, 2, 148} + p, err := models.NewPointFromBytes(corrupted) + if p != nil || err == nil { + t.Fatalf("NewPointFromBytes: got: (%v, %v), expected: (nil, error)", p, err) + } +} + +func TestNewPointsRejectsEmptyFieldNames(t *testing.T) { + if _, err := models.NewPoint("foo", nil, models.Fields{"": 1}, time.Now()); err == nil { + t.Fatalf("new point with empty field name. got: nil, expected: error") + } +} + +func TestNewPointsRejectsMaxKey(t *testing.T) { + var key string + for i := 0; i < 65536; i++ { + key += "a" + } + + if _, err := models.NewPoint(key, nil, models.Fields{"value": 1}, time.Now()); err == nil { + t.Fatalf("new point with max key. got: nil, expected: error") + } + + if _, err := models.ParsePointsString(fmt.Sprintf("%v value=1", key)); err == nil { + t.Fatalf("parse point with max key. got: nil, expected: error") + } +} + +func TestParseKeyEmpty(t *testing.T) { + if _, _, err := models.ParseKey(nil); err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestPoint_FieldIterator_Simple(t *testing.T) { + + p, err := models.ParsePoints([]byte(`m v=42i,f=42 36`)) + if err != nil { + t.Fatal(err) + } + + if len(p) != 1 { + t.Fatalf("wrong number of points, got %d, exp %d", len(p), 1) + } + + fi := p[0].FieldIterator() + + if !fi.Next() { + t.Fatal("field iterator terminated before first field") + } + + if fi.Type() != models.Integer { + t.Fatalf("'42i' should be an Integer, got %v", fi.Type()) + } + + if fi.IntegerValue() != 42 { + t.Fatalf("'42i' should be 42, got %d", fi.IntegerValue()) + } + + if !fi.Next() { + t.Fatalf("field iterator terminated before second field") + } + + if fi.Type() != models.Float { + t.Fatalf("'42' should be a Float, got %v", fi.Type()) + } + + if fi.FloatValue() != 42.0 { + t.Fatalf("'42' should be %f, got %f", 42.0, fi.FloatValue()) + } + + if fi.Next() { + t.Fatal("field iterator didn't terminate") + } +} + +func toFields(fi models.FieldIterator) models.Fields { + m := make(models.Fields) + for fi.Next() { + var v interface{} + switch fi.Type() { + case models.Float: + v = fi.FloatValue() + case models.Integer: + v = fi.IntegerValue() + case models.String: + v = fi.StringValue() + case models.Boolean: + v = fi.BooleanValue() + case models.Empty: + v = nil + default: + panic("unknown type") + } + m[string(fi.FieldKey())] = v + } + return m +} + +func TestPoint_FieldIterator_FieldMap(t *testing.T) { + + points, err := models.ParsePointsString(` +m v=42 +m v=42i +m v="string" +m v=true +m v="string\"with\"escapes" +m v=42i,f=42,g=42.314 +m a=2i,b=3i,c=true,d="stuff",e=-0.23,f=123.456 +`) + + if err != nil { + t.Fatal("failed to parse test points:", err) + } + + for _, p := range points { + exp := p.Fields() + got := toFields(p.FieldIterator()) + + if !reflect.DeepEqual(got, exp) { + t.Errorf("FieldIterator failed for %#q: got %#v, exp %#v", p.String(), got, exp) + } + } +} + +func TestPoint_FieldIterator_Delete_Begin(t *testing.T) { + points, err := models.ParsePointsString(`m a=1,b=2,c=3`) + if err != nil || len(points) != 1 { + t.Fatal("failed parsing point") + } + + fi := points[0].FieldIterator() + fi.Next() // a + fi.Delete() + + fi.Reset() + + got := toFields(fi) + exp := models.Fields{"b": float64(2), "c": float64(3)} + + if !reflect.DeepEqual(got, exp) { + t.Fatalf("Delete failed, got %#v, exp %#v", got, exp) + } +} + +func TestPoint_FieldIterator_Delete_Middle(t *testing.T) { + points, err := models.ParsePointsString(`m a=1,b=2,c=3`) + if err != nil || len(points) != 1 { + t.Fatal("failed parsing point") + } + + fi := points[0].FieldIterator() + fi.Next() // a + fi.Next() // b + fi.Delete() + + fi.Reset() + + got := toFields(fi) + exp := models.Fields{"a": float64(1), "c": float64(3)} + + if !reflect.DeepEqual(got, exp) { + t.Fatalf("Delete failed, got %#v, exp %#v", got, exp) + } +} + +func TestPoint_FieldIterator_Delete_End(t *testing.T) { + points, err := models.ParsePointsString(`m a=1,b=2,c=3`) + if err != nil || len(points) != 1 { + t.Fatal("failed parsing point") + } + + fi := points[0].FieldIterator() + fi.Next() // a + fi.Next() // b + fi.Next() // c + fi.Delete() + + fi.Reset() + + got := toFields(fi) + exp := models.Fields{"a": float64(1), "b": float64(2)} + + if !reflect.DeepEqual(got, exp) { + t.Fatalf("Delete failed, got %#v, exp %#v", got, exp) + } +} + +func TestPoint_FieldIterator_Delete_Nothing(t *testing.T) { + points, err := models.ParsePointsString(`m a=1,b=2,c=3`) + if err != nil || len(points) != 1 { + t.Fatal("failed parsing point") + } + + fi := points[0].FieldIterator() + fi.Delete() + + fi.Reset() + + got := toFields(fi) + exp := models.Fields{"a": float64(1), "b": float64(2), "c": float64(3)} + + if !reflect.DeepEqual(got, exp) { + t.Fatalf("Delete failed, got %#v, exp %#v", got, exp) + } +} + +func TestPoint_FieldIterator_Delete_Twice(t *testing.T) { + points, err := models.ParsePointsString(`m a=1,b=2,c=3`) + if err != nil || len(points) != 1 { + t.Fatal("failed parsing point") + } + + fi := points[0].FieldIterator() + fi.Next() // a + fi.Next() // b + fi.Delete() + fi.Delete() // no-op + + fi.Reset() + + got := toFields(fi) + exp := models.Fields{"a": float64(1), "c": float64(3)} + + if !reflect.DeepEqual(got, exp) { + t.Fatalf("Delete failed, got %#v, exp %#v", got, exp) + } +} diff --git a/vendor/github.com/influxdata/influxdb/models/statistic_test.go b/vendor/github.com/influxdata/influxdb/models/statistic_test.go new file mode 100644 index 000000000000..918c991342fa --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/statistic_test.go @@ -0,0 +1,55 @@ +package models_test + +import ( + "reflect" + "testing" + + "github.com/influxdata/influxdb/models" +) + +func TestTags_Merge(t *testing.T) { + examples := []struct { + Base map[string]string + Arg map[string]string + Result map[string]string + }{ + { + Base: nil, + Arg: nil, + Result: map[string]string{}, + }, + { + Base: nil, + Arg: map[string]string{"foo": "foo"}, + Result: map[string]string{"foo": "foo"}, + }, + { + Base: map[string]string{"foo": "foo"}, + Arg: nil, + Result: map[string]string{"foo": "foo"}, + }, + { + Base: map[string]string{"foo": "foo"}, + Arg: map[string]string{"bar": "bar"}, + Result: map[string]string{"foo": "foo", "bar": "bar"}, + }, + { + Base: map[string]string{"foo": "foo", "bar": "bar"}, + Arg: map[string]string{"zoo": "zoo"}, + Result: map[string]string{"foo": "foo", "bar": "bar", "zoo": "zoo"}, + }, + { + Base: map[string]string{"foo": "foo", "bar": "bar"}, + Arg: map[string]string{"bar": "newbar"}, + Result: map[string]string{"foo": "foo", "bar": "newbar"}, + }, + } + + for i, example := range examples { + i++ + result := models.StatisticTags(example.Base).Merge(example.Arg) + if got, exp := result, example.Result; !reflect.DeepEqual(got, exp) { + t.Errorf("[Example %d] got %#v, expected %#v", i, got, exp) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/nightly.sh b/vendor/github.com/influxdata/influxdb/nightly.sh new file mode 100755 index 000000000000..bfa8295ae51c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/nightly.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +SWAKS="/root/swaks" + +# Bump this whenever a release branch is created from master +MASTER_VERSION=0.10.0 + +# send_failure_notification sends an e-mail with a build failure notification. +function send_failure_notification { + smtp=$1 + user=$2 + password=$3 + to=$4 + version=$5 + $SWAKS --auth \ + --server $smtp \ + --au $user \ + --ap $password \ + --to $to \ + --h-Subject: "Nightly build has FAILED" \ + --body "The nightly build has failed, version: $version" +} + +if [ $# -lt 4 ]; then + echo "$0 [RACE_ENABLED]" + exit 1 +fi +SMTP=$1 +USER=$2 +PASSWORD=$3 +TO=$4 +RACE_ENABLED=$5 + +if [ -n "$RACE_ENABLED" ]; then + race="-x" + echo "Race-detection build enabled." +fi + +REPO_DIR=`mktemp -d` +echo "Using $REPO_DIR for all work..." + +cd $REPO_DIR +export GOPATH=`pwd` +mkdir -p $GOPATH/src/github.com/influxdata +cd $GOPATH/src/github.com/influxdata +git clone https://github.com/influxdata/influxdb.git + +cd $GOPATH/src/github.com/influxdata/influxdb +VERSION="$MASTER_VERSION-nightly-`git log --pretty=format:'%h' -n 1`" +NIGHTLY_BUILD=true ./package.sh $race $VERSION + +if [ $? -ne 0 ]; then + # Send notification e-mail. + send_failure_notification $SMTP $USER $PASSWORD $TO $VERSION +fi + +rm -rf $REPO_DIR diff --git a/vendor/github.com/influxdata/influxdb/node.go b/vendor/github.com/influxdata/influxdb/node.go new file mode 100644 index 000000000000..2d8e1b77d752 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/node.go @@ -0,0 +1,116 @@ +package influxdb + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" +) + +const ( + nodeFile = "node.json" + oldNodeFile = "id" + peersFilename = "peers.json" +) + +type Node struct { + path string + ID uint64 +} + +// LoadNode will load the node information from disk if present +func LoadNode(path string) (*Node, error) { + // Always check to see if we are upgrading first + if err := upgradeNodeFile(path); err != nil { + return nil, err + } + + n := &Node{ + path: path, + } + + f, err := os.Open(filepath.Join(path, nodeFile)) + if err != nil { + return nil, err + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(n); err != nil { + return nil, err + } + + return n, nil +} + +// NewNode will return a new node +func NewNode(path string) *Node { + return &Node{ + path: path, + } +} + +// Save will save the node file to disk and replace the existing one if present +func (n *Node) Save() error { + file := filepath.Join(n.path, nodeFile) + tmpFile := file + "tmp" + + f, err := os.Create(tmpFile) + if err != nil { + return err + } + + if err = json.NewEncoder(f).Encode(n); err != nil { + f.Close() + return err + } + + if err = f.Close(); nil != err { + return err + } + + return os.Rename(tmpFile, file) +} + +func upgradeNodeFile(path string) error { + oldFile := filepath.Join(path, oldNodeFile) + b, err := ioutil.ReadFile(oldFile) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + // We shouldn't have an empty ID file, but if we do, ignore it + if len(b) == 0 { + return nil + } + + peers := []string{} + pb, err := ioutil.ReadFile(filepath.Join(path, peersFilename)) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + err = json.Unmarshal(pb, &peers) + if len(peers) > 1 { + return fmt.Errorf("to upgrade a cluster, please contact support at influxdata") + } + + n := &Node{ + path: path, + } + if n.ID, err = strconv.ParseUint(string(b), 10, 64); err != nil { + return err + } + if err := n.Save(); err != nil { + return err + } + if err := os.Remove(oldFile); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/package.sh b/vendor/github.com/influxdata/influxdb/package.sh new file mode 100755 index 000000000000..a4e348787606 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/package.sh @@ -0,0 +1,571 @@ +#!/usr/bin/env bash + +########################################################################### +# Packaging script which creates debian and RPM packages. It optionally +# tags the repo with the given version. +# +# Requirements: GOPATH must be set. 'fpm' must be on the path, and the AWS +# CLI tools must also be installed. +# +# https://github.com/jordansissel/fpm +# http://aws.amazon.com/cli/ +# +# Packaging process: to package a build, simple execute: +# +# package.sh +# +# where is the desired version. If generation of a debian and RPM +# package is successful, the script will offer to tag the repo using the +# supplied version string. +# +# See package.sh -h for options +# +# AWS upload: the script will also offer to upload the packages to S3. If +# this option is selected, the credentials should be present in the file +# ~/aws.conf. The contents should be of the form: +# +# [default] +# aws_access_key_id= +# aws_secret_access_key= +# region = us-east-1 +# +# Trim the leading spaces when creating the file. The script will exit if +# S3 upload is requested, but this file does not exist. + +[ -z $DEBUG ] || set -x + +AWS_FILE=~/aws.conf + +INSTALL_ROOT_DIR=/usr/bin +INFLUXDB_LOG_DIR=/var/log/influxdb +INFLUXDB_DATA_DIR=/var/lib/influxdb +INFLUXDB_SCRIPT_DIR=/usr/lib/influxdb +CONFIG_ROOT_DIR=/etc/influxdb +LOGROTATE_DIR=/etc/logrotate.d + +SAMPLE_CONFIGURATION=etc/config.sample.toml +INITD_SCRIPT=scripts/init.sh +SYSTEMD_SCRIPT=scripts/influxdb.service +POSTINSTALL_SCRIPT=scripts/post-install.sh +PREINSTALL_SCRIPT=scripts/pre-install.sh +POSTUNINSTALL_SCRIPT=scripts/post-uninstall.sh +LOGROTATE=scripts/logrotate + +TMP_WORK_DIR=`mktemp -d` +POST_INSTALL_PATH=`mktemp` +POST_UNINSTALL_PATH=`mktemp` +ARCH=`uname -i` +LICENSE=MIT +URL=influxdb.com +MAINTAINER=support@influxdb.com +VENDOR=Influxdb +DESCRIPTION="Distributed time-series database" + +# Allow path to FPM to be set by environment variables. Some execution contexts +# like cron don't have PATH set correctly to pick it up. +if [ -z "$FPM" ]; then + FPM=`which fpm` +fi + +GO_VERSION="go1.4.3" +GOPATH_INSTALL= +BINS=( + influxd + influx + influx_stress + influx_tsm + influx_inspect + ) + +########################################################################### +# Helper functions. + +# usage prints simple usage information. +usage() { + cat << EOF >&2 +$0 [-h] [-p|-w] [-t ] [-r ] + + should be a dotted version such as 0.9.5. + + -r release candidate number, if any. + Example: -r 7 + -p just build packages + -x build with race-detection enabled + -w build packages for current working directory + imply -p + -t + build package for + can be rpm, tar or deb + can have multiple -t + + Examples: + + $0 0.9.5 -r 9 # Creates 0.9.5-rc9 + $0 0.9.4 # Creates 0.9.4 + +EOF + cleanup_exit $1 +} + +# full_version echoes the full version string, given a version and an optiona;l +# RC number. If the just the version is present, that is echoed. If the RC is +# also provided, then "rc" and the number is concatenated with the version. +# For example, 0.9.4rc4 would be returned if version was 0.9.4 and the RC number +# was 4. +full_version() { + version=$1 + rc=$2 + if [ -z "$rc" ]; then + echo $version + else + echo ${version}-rc${rc} + fi +} + +# rpm_release echoes the RPM release or "iteration" given an RC number. +rpm_release() { + rc=$1 + if [ -z "$rc" ]; then + echo 1 + else + echo 0.1.rc${rc} + fi +} + +# cleanup_exit removes all resources created during the process and exits with +# the supplied returned code. +cleanup_exit() { + rm -r $TMP_WORK_DIR + rm $POST_INSTALL_PATH + rm $POST_UNINSTALL_PATH + exit $1 +} + +# current_branch echos the current git branch. +current_branch() { + echo `git rev-parse --abbrev-ref HEAD` +} + +# check_gopath sanity checks the value of the GOPATH env variable, and determines +# the path where build artifacts are installed. GOPATH may be a colon-delimited +# list of directories. +check_gopath() { + [ -z "$GOPATH" ] && echo "GOPATH is not set." && cleanup_exit 1 + GOPATH_INSTALL=`echo $GOPATH | cut -d ':' -f 1` + [ ! -d "$GOPATH_INSTALL" ] && echo "GOPATH_INSTALL is not a directory." && cleanup_exit 1 + echo "GOPATH ($GOPATH) looks sane, using $GOPATH_INSTALL for installation." +} + +check_gvm() { + if [ -n "$GOPATH" ]; then + existing_gopath=$GOPATH + fi + + source $HOME/.gvm/scripts/gvm + which gvm + if [ $? -ne 0 ]; then + echo "gvm not found -- aborting." + cleanup_exit $1 + fi + gvm use $GO_VERSION + if [ $? -ne 0 ]; then + echo "gvm cannot find Go version $GO_VERSION -- aborting." + cleanup_exit $1 + fi + + # Keep any existing GOPATH set. + if [ -n "$existing_gopath" ]; then + GOPATH=$existing_gopath + fi +} + +# check_clean_tree ensures that no source file is locally modified. +check_clean_tree() { + modified=$(git ls-files --modified | wc -l) + if [ $modified -ne 0 ]; then + echo "The source tree is not clean -- aborting." + cleanup_exit 1 + fi + echo "Git tree is clean." +} + +# update_tree ensures the tree is in-sync with the repo. +update_tree() { + git pull origin $TARGET_BRANCH + if [ $? -ne 0 ]; then + echo "Failed to pull latest code -- aborting." + cleanup_exit 1 + fi + git fetch --tags + if [ $? -ne 0 ]; then + echo "Failed to fetch tags -- aborting." + cleanup_exit 1 + fi + echo "Git tree updated successfully." +} + +# check_tag_exists checks if the existing release already exists in the tags. +check_tag_exists () { + version=$1 + git tag | grep -q "^v$version$" + if [ $? -eq 0 ]; then + echo "Proposed version $version already exists as a tag -- aborting." + cleanup_exit 1 + fi +} + +# make_dir_tree creates the directory structure within the packages. +make_dir_tree() { + work_dir=$1 + version=$2 + + mkdir -p $work_dir/$INSTALL_ROOT_DIR + mkdir -p $work_dir/$INFLUXDB_SCRIPT_DIR/scripts + if [ $? -ne 0 ]; then + echo "Failed to create script directory -- aborting." + cleanup_exit 1 + fi + mkdir -p $work_dir/$CONFIG_ROOT_DIR + if [ $? -ne 0 ]; then + echo "Failed to create configuration directory -- aborting." + cleanup_exit 1 + fi + mkdir -p $work_dir/$LOGROTATE_DIR + if [ $? -ne 0 ]; then + echo "Failed to create logrotate directory -- aborting." + cleanup_exit 1 + fi +} + +# do_build builds the code. The version and commit must be passed in. +do_build() { + for b in ${BINS[*]}; do + rm -f $GOPATH_INSTALL/bin/$b + done + + if [ -n "$WORKING_DIR" ]; then + STASH=`git stash create -a` + if [ $? -ne 0 ]; then + echo "WARNING: failed to stash uncommited local changes" + fi + git reset --hard + fi + + go get -u -f -d ./... + if [ $? -ne 0 ]; then + echo "WARNING: failed to 'go get' packages." + fi + + git checkout $TARGET_BRANCH # go get switches to master, so ensure we're back. + + if [ -n "$WORKING_DIR" ]; then + git stash apply $STASH + if [ $? -ne 0 ]; then #and apply previous uncommited local changes + echo "WARNING: failed to restore uncommited local changes" + fi + fi + + version=$1 + commit=`git rev-parse HEAD` + branch=`current_branch` + if [ $? -ne 0 ]; then + echo "Unable to retrieve current commit -- aborting" + cleanup_exit 1 + fi + + date=`date -u --iso-8601=seconds` + go install $RACE -a -ldflags="-X main.version=$version -X main.branch=$branch -X main.commit=$commit -X main.buildTime=$date" ./... + if [ $? -ne 0 ]; then + echo "Build failed, unable to create package -- aborting" + cleanup_exit 1 + fi + echo "Build completed successfully." +} + +########################################################################### +# Process options +while : +do + case $1 in + -h | --help) + usage 0 + ;; + + -p | --packages-only) + PACKAGES_ONLY="PACKAGES_ONLY" + shift + ;; + + -t | --target) + case "$2" in + 'tar') TAR_WANTED="gz" + ;; + 'deb') DEB_WANTED="deb" + ;; + 'rpm') RPM_WANTED="rpm" + ;; + *) + echo "Unknown target distribution $2" + usage 1 + ;; + esac + shift 2 + ;; + + -r) + RC=$2 + if [ -z "$RC" ]; then + echo "RC number required" + fi + shift 2 + ;; + + -x) + RACE="-race" + shift + ;; + + -w | --working-directory) + PACKAGES_ONLY="PACKAGES_ONLY" + WORKING_DIR="WORKING_DIR" + shift + ;; + + -*) + echo "Unknown option $1" + usage 1 + ;; + + ?*) + if [ -z $VERSION ]; then + VERSION=$1 + shift + else + echo "$1 : aborting version already set to $VERSION" + usage 1 + fi + + + echo $VERSION | grep -i '[r|rc]' 2>&1 >/dev/null + if [ $? -ne 1 -a -z "$NIGHTLY_BUILD" ]; then + echo + echo "$VERSION contains reference to RC - specify RC separately" + echo + usage 1 + fi + ;; + + *) break + esac +done + +if [ -z "$DEB_WANTED$RPM_WANTED$TAR_WANTED" ]; then + TAR_WANTED="gz" + DEB_WANTED="deb" + RPM_WANTED="rpm" +fi + +if [ -z "$VERSION" ]; then + echo -e "Missing version" + usage 1 +fi + +########################################################################### +# Start the packaging process. + +echo -e "\nStarting package process...\n" + +# Ensure the current is correct. +TARGET_BRANCH=`current_branch` +if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then +echo -n "Current branch is $TARGET_BRANCH. Start packaging this branch? [Y/n] " + read response + response=`echo $response | tr 'A-Z' 'a-z'` + if [ "x$response" == "xn" ]; then + echo "Packaging aborted." + cleanup_exit 1 + fi +fi + +check_gvm +check_gopath +if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then + check_clean_tree + update_tree + check_tag_exists `full_version $VERSION $RC` +fi + +do_build `full_version $VERSION $RC` +make_dir_tree $TMP_WORK_DIR `full_version $VERSION $RC` + +########################################################################### +# Copy the assets to the installation directories. + +for b in ${BINS[*]}; do + cp $GOPATH_INSTALL/bin/$b $TMP_WORK_DIR/$INSTALL_ROOT_DIR/ + if [ $? -ne 0 ]; then + echo "Failed to copy binaries to packaging directory ($TMP_WORK_DIR/$INSTALL_ROOT_DIR/) -- aborting." + cleanup_exit 1 + fi +done +echo "${BINS[*]} copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/" + +cp $INITD_SCRIPT $TMP_WORK_DIR/$INFLUXDB_SCRIPT_DIR/$INITD_SCRIPT +if [ $? -ne 0 ]; then + echo "Failed to copy init.d script to packaging directory ($TMP_WORK_DIR/$INFLUXDB_SCRIPT_DIR/) -- aborting." + cleanup_exit 1 +fi +echo "$INITD_SCRIPT copied to $TMP_WORK_DIR/$INFLUXDB_SCRIPT_DIR" + +cp $SYSTEMD_SCRIPT $TMP_WORK_DIR/$INFLUXDB_SCRIPT_DIR/$SYSTEMD_SCRIPT +if [ $? -ne 0 ]; then + echo "Failed to copy systemd script to packaging directory -- aborting." + cleanup_exit 1 +fi +echo "$SYSTEMD_SCRIPT copied to $TMP_WORK_DIR/$INFLUXDB_SCRIPT_DIR" + +cp $SAMPLE_CONFIGURATION $TMP_WORK_DIR/$CONFIG_ROOT_DIR/influxdb.conf +if [ $? -ne 0 ]; then + echo "Failed to copy $SAMPLE_CONFIGURATION to packaging directory -- aborting." + cleanup_exit 1 +fi + +install -m 644 $LOGROTATE $TMP_WORK_DIR/$LOGROTATE_DIR/influxdb +if [ $? -ne 0 ]; then + echo "Failed to copy logrotate configuration to packaging directory -- aborting." + cleanup_exit 1 +fi + +########################################################################### +# Create the actual packages. + +if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then + echo -n "Commence creation of $ARCH packages, version `full_version $VERSION $RC`? [Y/n] " + read response + response=`echo $response | tr 'A-Z' 'a-z'` + if [ "x$response" == "xn" ]; then + echo "Packaging aborted." + cleanup_exit 1 + fi +fi + +COMMON_FPM_ARGS="\ +--log error \ +-C $TMP_WORK_DIR \ +--vendor $VENDOR \ +--url $URL \ +--license $LICENSE \ +--maintainer $MAINTAINER \ +--after-install $POSTINSTALL_SCRIPT \ +--before-install $PREINSTALL_SCRIPT \ +--after-remove $POSTUNINSTALL_SCRIPT \ +--name influxdb${RACE} \ +--config-files $CONFIG_ROOT_DIR \ +--config-files $LOGROTATE_DIR" + +if [ -n "$DEB_WANTED" ]; then + $FPM -s dir -t deb $deb_args --description "$DESCRIPTION" $COMMON_FPM_ARGS --version `full_version $VERSION $RC` . + if [ $? -ne 0 ]; then + echo "Failed to create Debian package -- aborting." + cleanup_exit 1 + fi + echo "Debian package created successfully." +fi + +if [ -n "$TAR_WANTED" ]; then + if [ -n "$RACE" ]; then + # Tweak race prefix for tarball. + race="race_" + fi + $FPM -s dir -t tar --prefix influxdb_$race`full_version $VERSION $RC`_${ARCH} -p influxdb_$race`full_version $VERSION $RC`_${ARCH}.tar.gz --description "$DESCRIPTION" $COMMON_FPM_ARGS --version `full_version $VERSION $RC ` . + if [ $? -ne 0 ]; then + echo "Failed to create Tar package -- aborting." + cleanup_exit 1 + fi + echo "Tar package created successfully." +fi + +if [ -n "$RPM_WANTED" ]; then + $rpm_args $FPM -s dir -t rpm --description "$DESCRIPTION" $COMMON_FPM_ARGS --depends coreutils --version $VERSION --iteration `rpm_release $RC` . + if [ $? -ne 0 ]; then + echo "Failed to create RPM package -- aborting." + cleanup_exit 1 + fi + echo "RPM package created successfully." +fi + +########################################################################### +# Offer to tag the repo. + +if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then + echo -n "Tag source tree with v`full_version $VERSION $RC` and push to repo? [y/N] " + read response + response=`echo $response | tr 'A-Z' 'a-z'` + if [ "x$response" == "xy" ]; then + echo "Creating tag v`full_version $VERSION $RC` and pushing to repo" + git tag v`full_version $VERSION $RC` + if [ $? -ne 0 ]; then + echo "Failed to create tag v`full_version $VERSION $RC` -- aborting" + cleanup_exit 1 + fi + echo "Tag v`full_version $VERSION $RC` created" + git push origin v`full_version $VERSION $RC` + if [ $? -ne 0 ]; then + echo "Failed to push tag v`full_version $VERSION $RC` to repo -- aborting" + cleanup_exit 1 + fi + echo "Tag v`full_version $VERSION $RC` pushed to repo" + else + echo "Not creating tag v`full_version $VERSION $RC`." + fi +fi + +########################################################################### +# Offer to publish the packages. + +if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then + echo -n "Publish packages to S3? [y/N] " + read response + response=`echo $response | tr 'A-Z' 'a-z'` +fi + +if [ "x$response" == "xy" -o -n "$NIGHTLY_BUILD" ]; then + echo "Publishing packages to S3." + if [ ! -e "$AWS_FILE" ]; then + echo "$AWS_FILE does not exist -- aborting." + cleanup_exit 1 + fi + + for filepath in `ls *.{$DEB_WANTED,$RPM_WANTED,$TAR_WANTED} 2> /dev/null`; do + filename=`basename $filepath` + sum=`md5sum $filename | cut -d ' ' -f 1` + + if [ -n "$NIGHTLY_BUILD" ]; then + # Replace the version string in the filename with "nightly". + v=`full_version $VERSION $RC` + v_underscored=`echo "$v" | tr - _` + v_rpm=$VERSION-`rpm_release $RC` + + # It's ok to run each of these since only 1 will match, leaving + # filename untouched otherwise. + filename=`echo $filename | sed s/$v/nightly/` + filename=`echo $filename | sed s/$v_underscored/nightly/` + filename=`echo $filename | sed s/$v_rpm/nightly-1/` + fi + + AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $filepath s3://influxdb/$filename --acl public-read --region us-east-1 + if [ $? -ne 0 ]; then + echo "Upload failed ($filename) -- aborting". + cleanup_exit 1 + fi + echo "$filename uploaded, MD5 checksum is $sum" + done +else + echo "Not publishing packages to S3." +fi + +########################################################################### +# All done. + +echo -e "\nPackaging process complete." +cleanup_exit 0 diff --git a/vendor/github.com/influxdata/influxdb/pkg/README.md b/vendor/github.com/influxdata/influxdb/pkg/README.md new file mode 100644 index 000000000000..1c73a9e85878 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/README.md @@ -0,0 +1,5 @@ +pkg/ is a collection of utility packages used by the InfluxDB project without being specific to its internals. + +Utility packages are kept separate from the InfluxDB core codebase to keep it as small and concise as possible. If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the InfluxDB organization, to facilitate re-use by other projects. However that is not the priority. + +Because utility packages are small and neatly separated from the rest of the codebase, they are a good place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them! diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/BUILD b/vendor/github.com/influxdata/influxdb/pkg/escape/BUILD index 26233e328472..8d13f889fcf2 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/escape/BUILD +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -12,6 +7,14 @@ go_library( "strings.go", ], importpath = "github.com/influxdata/influxdb/pkg/escape", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["bytes_test.go"], + importpath = "github.com/influxdata/influxdb/pkg/escape", + library = ":go_default_library", ) filegroup( @@ -25,4 +28,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go index 5b6ba15aabc2..a3c219d2d18e 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go @@ -1,4 +1,4 @@ -package escape +package escape // import "github.com/influxdata/influxdb/pkg/escape" import ( "bytes" diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes_test.go b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes_test.go new file mode 100644 index 000000000000..e9418f659f78 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes_test.go @@ -0,0 +1,68 @@ +package escape + +import ( + "bytes" + "reflect" + "strings" + "testing" +) + +func TestUnescape(t *testing.T) { + tests := []struct { + in []byte + out []byte + }{ + { + []byte(nil), + []byte(nil), + }, + + { + []byte(""), + []byte(nil), + }, + + { + []byte("\\,\\\"\\ \\="), + []byte(",\" ="), + }, + + { + []byte("\\\\"), + []byte("\\\\"), + }, + + { + []byte("plain and simple"), + []byte("plain and simple"), + }, + } + + for ii, tt := range tests { + got := Unescape(tt.in) + if !reflect.DeepEqual(got, tt.out) { + t.Errorf("[%d] Unescape(%#v) = %#v, expected %#v", ii, string(tt.in), string(got), string(tt.out)) + } + } +} + +func TestAppendUnescaped(t *testing.T) { + cases := strings.Split(strings.TrimSpace(` +normal +inv\alid +goo\"d +sp\ ace +\,\"\ \= +f\\\ x +`), "\n") + + for _, c := range cases { + exp := Unescape([]byte(c)) + got := AppendUnescaped(nil, []byte(c)) + + if !bytes.Equal(got, exp) { + t.Errorf("AppendUnescaped failed for %#q: got %#q, exp %#q", c, got, exp) + } + } + +} diff --git a/vendor/github.com/influxdata/influxdb/test.sh b/vendor/github.com/influxdata/influxdb/test.sh new file mode 100755 index 000000000000..15524a16f45d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/test.sh @@ -0,0 +1,206 @@ +#!/bin/bash +# +# This is the InfluxDB test script. +# This script can run tests in different environments. +# +# Usage: ./test.sh +# Corresponding environments for environment_index: +# 0: normal 64bit tests +# 1: race enabled 64bit tests +# 3: normal 32bit tests +# save: build the docker images and save them to DOCKER_SAVE_DIR. Do not run tests. +# count: print the number of test environments +# *: to run all tests in parallel containers +# +# Logs from the test runs will be saved in OUTPUT_DIR, which defaults to ./test-logs +# + +# Get dir of script and make it is our working directory. +DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) +cd $DIR + +ENVIRONMENT_INDEX=$1 +# Set the default OUTPUT_DIR +OUTPUT_DIR=${OUTPUT_DIR-./test-logs} +# Set the default DOCKER_SAVE_DIR +DOCKER_SAVE_DIR=${DOCKER_SAVE_DIR-$HOME/docker} +# Set default parallelism +PARALLELISM=${PARALLELISM-1} +# Set default timeout +TIMEOUT=${TIMEOUT-960s} + +# Default to deleteing the container +DOCKER_RM=${DOCKER_RM-true} + +# Update this value if you add a new test environment. +ENV_COUNT=3 + +# Default return code 0 +rc=0 + +# Executes the given statement, and exits if the command returns a non-zero code. +function exit_if_fail { + command=$@ + echo "Executing '$command'" + $command + rc=$? + if [ $rc -ne 0 ]; then + echo "'$command' returned $rc." + exit $rc + fi +} + +# Convert dockerfile name to valid docker image tag name. +function filename2imagename { + echo ${1/Dockerfile/influxdb} +} + +# Run a test in a docker container +# Usage: run_test_docker +function run_test_docker { + local dockerfile=$1 + local imagename=$(filename2imagename "$dockerfile") + shift + local name=$1 + shift + local logfile="$OUTPUT_DIR/${name}.log" + + build_docker_image "$dockerfile" "$imagename" + echo "Running test in docker $name with args $@" + + docker run \ + --rm=$DOCKER_RM \ + -v "$DIR:/root/go/src/github.com/influxdata/influxdb" \ + -e "INFLUXDB_DATA_ENGINE=$INFLUXDB_DATA_ENGINE" \ + -e "GORACE=$GORACE" \ + -e "GO_CHECKOUT=$GO_CHECKOUT" \ + -e "AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" \ + -e "AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY" \ + "$imagename" \ + "--parallel=$PARALLELISM" \ + "--timeout=$TIMEOUT" \ + "$@" \ + 2>&1 | tee "$logfile" + return "${PIPESTATUS[0]}" + +} + +# Build the docker image defined by given dockerfile. +function build_docker_image { + local dockerfile=$1 + local imagename=$2 + + echo "Building docker image $imagename" + exit_if_fail docker build -f "$dockerfile" -t "$imagename" . +} + + +# Saves a docker image to $DOCKER_SAVE_DIR +function save_docker_image { + local dockerfile=$1 + local imagename=$(filename2imagename "$dockerfile") + local imagefile="$DOCKER_SAVE_DIR/${imagename}.tar.gz" + + if [ ! -d "$DOCKER_SAVE_DIR" ] + then + mkdir -p "$DOCKER_SAVE_DIR" + fi + + if [[ -e "$imagefile" ]] + then + zcat $imagefile | docker load + fi + imageid=$(docker images -q --no-trunc "$imagename") + build_docker_image "$dockerfile" "$imagename" + newimageid=$(docker images -q --no-trunc "$imagename") + rc=0 + if [ "$imageid" != "$newimageid" ] + then + docker save "$imagename" | gzip > "$imagefile" + rc="${PIPESTATUS[0]}" + fi + return "$rc" +} + +if [ ! -d "$OUTPUT_DIR" ] +then + mkdir -p "$OUTPUT_DIR" +fi + +# Run the tests. +case $ENVIRONMENT_INDEX in + 0) + # 64 bit tests + run_test_docker Dockerfile_build_ubuntu64 test_64bit --generate --test + rc=$? + ;; + 1) + # 64 bit race tests + GORACE="halt_on_error=1" + run_test_docker Dockerfile_build_ubuntu64 test_64bit_race --generate --test --race + rc=$? + ;; + 2) + # 32 bit tests + run_test_docker Dockerfile_build_ubuntu32 test_32bit --generate --test --arch=i386 + rc=$? + ;; + "save") + # Save docker images for every Dockerfile_build* file. + # Useful for creating an external cache. + pids=() + for d in Dockerfile_build* + do + echo "Building and saving $d ..." + save_docker_image "$d" > $OUTPUT_DIR/${d}.log 2>&1 & + pids+=($!) + done + echo "Waiting..." + # Wait for all saves to finish + for pid in "${pids[@]}" + do + wait $pid + rc=$(($? + $rc)) + done + # Check if all saves passed + if [ $rc -eq 0 ] + then + echo "All saves succeeded" + else + echo "Some saves failed, check logs in $OUTPUT_DIR" + fi + ;; + "count") + echo $ENV_COUNT + ;; + *) + echo "No individual test environment specified running tests for all $ENV_COUNT environments." + # Run all test environments + pids=() + for t in $(seq 0 "$(($ENV_COUNT - 1))") + do + $0 $t 2>&1 > /dev/null & + # add PID to list + pids+=($!) + done + + echo "Started all tests. Follow logs in ${OUTPUT_DIR}. Waiting..." + + # Wait for all tests to finish + for pid in "${pids[@]}" + do + wait $pid + rc=$(($? + $rc)) + done + + # Check if all tests passed + if [ $rc -eq 0 ] + then + echo "All test have passed" + else + echo "Some tests failed check logs in $OUTPUT_DIR for results" + fi + ;; +esac + +exit $rc diff --git a/vendor/github.com/jinzhu/gorm/BUILD b/vendor/github.com/jinzhu/gorm/BUILD index 7a83b7f2407d..6beba3d632f3 100644 --- a/vendor/github.com/jinzhu/gorm/BUILD +++ b/vendor/github.com/jinzhu/gorm/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -35,9 +30,56 @@ go_library( "utils.go", ], importpath = "github.com/jinzhu/gorm", + visibility = ["//visibility:public"], deps = ["//vendor/github.com/jinzhu/inflection:go_default_library"], ) +go_test( + name = "go_default_test", + srcs = [ + "callback_system_test.go", + "search_test.go", + ], + importpath = "github.com/jinzhu/gorm", + library = ":go_default_library", +) + +go_test( + name = "go_default_xtest", + srcs = [ + "association_test.go", + "callbacks_test.go", + "create_test.go", + "customize_column_test.go", + "delete_test.go", + "embedded_struct_test.go", + "errors_test.go", + "field_test.go", + "join_table_test.go", + "main_test.go", + "migration_test.go", + "multi_primary_keys_test.go", + "pointer_test.go", + "polymorphic_test.go", + "preload_test.go", + "query_test.go", + "scaner_test.go", + "scope_test.go", + "update_test.go", + "utils_test.go", + ], + importpath = "github.com/jinzhu/gorm_test", + deps = [ + ":go_default_library", + "//vendor/github.com/erikstmartin/go-testdb:go_default_library", + "//vendor/github.com/jinzhu/gorm/dialects/mssql:go_default_library", + "//vendor/github.com/jinzhu/gorm/dialects/mysql:go_default_library", + "//vendor/github.com/jinzhu/gorm/dialects/postgres:go_default_library", + "//vendor/github.com/jinzhu/gorm/dialects/sqlite:go_default_library", + "//vendor/github.com/jinzhu/now:go_default_library", + ], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -53,4 +95,5 @@ filegroup( "//vendor/github.com/jinzhu/gorm/dialects/sqlite:all-srcs", ], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/jinzhu/gorm/association_test.go b/vendor/github.com/jinzhu/gorm/association_test.go new file mode 100644 index 000000000000..c84f84ed610b --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/association_test.go @@ -0,0 +1,907 @@ +package gorm_test + +import ( + "fmt" + "os" + "reflect" + "sort" + "testing" + + "github.com/jinzhu/gorm" +) + +func TestBelongsTo(t *testing.T) { + post := Post{ + Title: "post belongs to", + Body: "body belongs to", + Category: Category{Name: "Category 1"}, + MainCategory: Category{Name: "Main Category 1"}, + } + + if err := DB.Save(&post).Error; err != nil { + t.Error("Got errors when save post", err) + } + + if post.Category.ID == 0 || post.MainCategory.ID == 0 { + t.Errorf("Category's primary key should be updated") + } + + if post.CategoryId.Int64 == 0 || post.MainCategoryId == 0 { + t.Errorf("post's foreign key should be updated") + } + + // Query + var category1 Category + DB.Model(&post).Association("Category").Find(&category1) + if category1.Name != "Category 1" { + t.Errorf("Query belongs to relations with Association") + } + + var mainCategory1 Category + DB.Model(&post).Association("MainCategory").Find(&mainCategory1) + if mainCategory1.Name != "Main Category 1" { + t.Errorf("Query belongs to relations with Association") + } + + var category11 Category + DB.Model(&post).Related(&category11) + if category11.Name != "Category 1" { + t.Errorf("Query belongs to relations with Related") + } + + if DB.Model(&post).Association("Category").Count() != 1 { + t.Errorf("Post's category count should be 1") + } + + if DB.Model(&post).Association("MainCategory").Count() != 1 { + t.Errorf("Post's main category count should be 1") + } + + // Append + var category2 = Category{ + Name: "Category 2", + } + DB.Model(&post).Association("Category").Append(&category2) + + if category2.ID == 0 { + t.Errorf("Category should has ID when created with Append") + } + + var category21 Category + DB.Model(&post).Related(&category21) + + if category21.Name != "Category 2" { + t.Errorf("Category should be updated with Append") + } + + if DB.Model(&post).Association("Category").Count() != 1 { + t.Errorf("Post's category count should be 1") + } + + // Replace + var category3 = Category{ + Name: "Category 3", + } + DB.Model(&post).Association("Category").Replace(&category3) + + if category3.ID == 0 { + t.Errorf("Category should has ID when created with Replace") + } + + var category31 Category + DB.Model(&post).Related(&category31) + if category31.Name != "Category 3" { + t.Errorf("Category should be updated with Replace") + } + + if DB.Model(&post).Association("Category").Count() != 1 { + t.Errorf("Post's category count should be 1") + } + + // Delete + DB.Model(&post).Association("Category").Delete(&category2) + if DB.Model(&post).Related(&Category{}).RecordNotFound() { + t.Errorf("Should not delete any category when Delete a unrelated Category") + } + + if post.Category.Name == "" { + t.Errorf("Post's category should not be reseted when Delete a unrelated Category") + } + + DB.Model(&post).Association("Category").Delete(&category3) + + if post.Category.Name != "" { + t.Errorf("Post's category should be reseted after Delete") + } + + var category41 Category + DB.Model(&post).Related(&category41) + if category41.Name != "" { + t.Errorf("Category should be deleted with Delete") + } + + if count := DB.Model(&post).Association("Category").Count(); count != 0 { + t.Errorf("Post's category count should be 0 after Delete, but got %v", count) + } + + // Clear + DB.Model(&post).Association("Category").Append(&Category{ + Name: "Category 2", + }) + + if DB.Model(&post).Related(&Category{}).RecordNotFound() { + t.Errorf("Should find category after append") + } + + if post.Category.Name == "" { + t.Errorf("Post's category should has value after Append") + } + + DB.Model(&post).Association("Category").Clear() + + if post.Category.Name != "" { + t.Errorf("Post's category should be cleared after Clear") + } + + if !DB.Model(&post).Related(&Category{}).RecordNotFound() { + t.Errorf("Should not find any category after Clear") + } + + if count := DB.Model(&post).Association("Category").Count(); count != 0 { + t.Errorf("Post's category count should be 0 after Clear, but got %v", count) + } + + // Check Association mode with soft delete + category6 := Category{ + Name: "Category 6", + } + DB.Model(&post).Association("Category").Append(&category6) + + if count := DB.Model(&post).Association("Category").Count(); count != 1 { + t.Errorf("Post's category count should be 1 after Append, but got %v", count) + } + + DB.Delete(&category6) + + if count := DB.Model(&post).Association("Category").Count(); count != 0 { + t.Errorf("Post's category count should be 0 after the category has been deleted, but got %v", count) + } + + if err := DB.Model(&post).Association("Category").Find(&Category{}).Error; err == nil { + t.Errorf("Post's category is not findable after Delete") + } + + if count := DB.Unscoped().Model(&post).Association("Category").Count(); count != 1 { + t.Errorf("Post's category count should be 1 when query with Unscoped, but got %v", count) + } + + if err := DB.Unscoped().Model(&post).Association("Category").Find(&Category{}).Error; err != nil { + t.Errorf("Post's category should be findable when query with Unscoped, got %v", err) + } +} + +func TestBelongsToOverrideForeignKey1(t *testing.T) { + type Profile struct { + gorm.Model + Name string + } + + type User struct { + gorm.Model + Profile Profile `gorm:"ForeignKey:ProfileRefer"` + ProfileRefer int + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "belongs_to" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"ProfileRefer"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestBelongsToOverrideForeignKey2(t *testing.T) { + type Profile struct { + gorm.Model + Refer string + Name string + } + + type User struct { + gorm.Model + Profile Profile `gorm:"ForeignKey:ProfileID;AssociationForeignKey:Refer"` + ProfileID int + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "belongs_to" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"ProfileID"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestHasOne(t *testing.T) { + user := User{ + Name: "has one", + CreditCard: CreditCard{Number: "411111111111"}, + } + + if err := DB.Save(&user).Error; err != nil { + t.Error("Got errors when save user", err.Error()) + } + + if user.CreditCard.UserId.Int64 == 0 { + t.Errorf("CreditCard's foreign key should be updated") + } + + // Query + var creditCard1 CreditCard + DB.Model(&user).Related(&creditCard1) + + if creditCard1.Number != "411111111111" { + t.Errorf("Query has one relations with Related") + } + + var creditCard11 CreditCard + DB.Model(&user).Association("CreditCard").Find(&creditCard11) + + if creditCard11.Number != "411111111111" { + t.Errorf("Query has one relations with Related") + } + + if DB.Model(&user).Association("CreditCard").Count() != 1 { + t.Errorf("User's credit card count should be 1") + } + + // Append + var creditcard2 = CreditCard{ + Number: "411111111112", + } + DB.Model(&user).Association("CreditCard").Append(&creditcard2) + + if creditcard2.ID == 0 { + t.Errorf("Creditcard should has ID when created with Append") + } + + var creditcard21 CreditCard + DB.Model(&user).Related(&creditcard21) + if creditcard21.Number != "411111111112" { + t.Errorf("CreditCard should be updated with Append") + } + + if DB.Model(&user).Association("CreditCard").Count() != 1 { + t.Errorf("User's credit card count should be 1") + } + + // Replace + var creditcard3 = CreditCard{ + Number: "411111111113", + } + DB.Model(&user).Association("CreditCard").Replace(&creditcard3) + + if creditcard3.ID == 0 { + t.Errorf("Creditcard should has ID when created with Replace") + } + + var creditcard31 CreditCard + DB.Model(&user).Related(&creditcard31) + if creditcard31.Number != "411111111113" { + t.Errorf("CreditCard should be updated with Replace") + } + + if DB.Model(&user).Association("CreditCard").Count() != 1 { + t.Errorf("User's credit card count should be 1") + } + + // Delete + DB.Model(&user).Association("CreditCard").Delete(&creditcard2) + var creditcard4 CreditCard + DB.Model(&user).Related(&creditcard4) + if creditcard4.Number != "411111111113" { + t.Errorf("Should not delete credit card when Delete a unrelated CreditCard") + } + + if DB.Model(&user).Association("CreditCard").Count() != 1 { + t.Errorf("User's credit card count should be 1") + } + + DB.Model(&user).Association("CreditCard").Delete(&creditcard3) + if !DB.Model(&user).Related(&CreditCard{}).RecordNotFound() { + t.Errorf("Should delete credit card with Delete") + } + + if DB.Model(&user).Association("CreditCard").Count() != 0 { + t.Errorf("User's credit card count should be 0 after Delete") + } + + // Clear + var creditcard5 = CreditCard{ + Number: "411111111115", + } + DB.Model(&user).Association("CreditCard").Append(&creditcard5) + + if DB.Model(&user).Related(&CreditCard{}).RecordNotFound() { + t.Errorf("Should added credit card with Append") + } + + if DB.Model(&user).Association("CreditCard").Count() != 1 { + t.Errorf("User's credit card count should be 1") + } + + DB.Model(&user).Association("CreditCard").Clear() + if !DB.Model(&user).Related(&CreditCard{}).RecordNotFound() { + t.Errorf("Credit card should be deleted with Clear") + } + + if DB.Model(&user).Association("CreditCard").Count() != 0 { + t.Errorf("User's credit card count should be 0 after Clear") + } + + // Check Association mode with soft delete + var creditcard6 = CreditCard{ + Number: "411111111116", + } + DB.Model(&user).Association("CreditCard").Append(&creditcard6) + + if count := DB.Model(&user).Association("CreditCard").Count(); count != 1 { + t.Errorf("User's credit card count should be 1 after Append, but got %v", count) + } + + DB.Delete(&creditcard6) + + if count := DB.Model(&user).Association("CreditCard").Count(); count != 0 { + t.Errorf("User's credit card count should be 0 after credit card deleted, but got %v", count) + } + + if err := DB.Model(&user).Association("CreditCard").Find(&CreditCard{}).Error; err == nil { + t.Errorf("User's creditcard is not findable after Delete") + } + + if count := DB.Unscoped().Model(&user).Association("CreditCard").Count(); count != 1 { + t.Errorf("User's credit card count should be 1 when query with Unscoped, but got %v", count) + } + + if err := DB.Unscoped().Model(&user).Association("CreditCard").Find(&CreditCard{}).Error; err != nil { + t.Errorf("User's creditcard should be findable when query with Unscoped, got %v", err) + } +} + +func TestHasOneOverrideForeignKey1(t *testing.T) { + type Profile struct { + gorm.Model + Name string + UserRefer uint + } + + type User struct { + gorm.Model + Profile Profile `gorm:"ForeignKey:UserRefer"` + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "has_one" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserRefer"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestHasOneOverrideForeignKey2(t *testing.T) { + type Profile struct { + gorm.Model + Name string + UserID uint + } + + type User struct { + gorm.Model + Refer string + Profile Profile `gorm:"ForeignKey:UserID;AssociationForeignKey:Refer"` + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "has_one" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserID"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestHasMany(t *testing.T) { + post := Post{ + Title: "post has many", + Body: "body has many", + Comments: []*Comment{{Content: "Comment 1"}, {Content: "Comment 2"}}, + } + + if err := DB.Save(&post).Error; err != nil { + t.Error("Got errors when save post", err) + } + + for _, comment := range post.Comments { + if comment.PostId == 0 { + t.Errorf("comment's PostID should be updated") + } + } + + var compareComments = func(comments []Comment, contents []string) bool { + var commentContents []string + for _, comment := range comments { + commentContents = append(commentContents, comment.Content) + } + sort.Strings(commentContents) + sort.Strings(contents) + return reflect.DeepEqual(commentContents, contents) + } + + // Query + if DB.First(&Comment{}, "content = ?", "Comment 1").Error != nil { + t.Errorf("Comment 1 should be saved") + } + + var comments1 []Comment + DB.Model(&post).Association("Comments").Find(&comments1) + if !compareComments(comments1, []string{"Comment 1", "Comment 2"}) { + t.Errorf("Query has many relations with Association") + } + + var comments11 []Comment + DB.Model(&post).Related(&comments11) + if !compareComments(comments11, []string{"Comment 1", "Comment 2"}) { + t.Errorf("Query has many relations with Related") + } + + if DB.Model(&post).Association("Comments").Count() != 2 { + t.Errorf("Post's comments count should be 2") + } + + // Append + DB.Model(&post).Association("Comments").Append(&Comment{Content: "Comment 3"}) + + var comments2 []Comment + DB.Model(&post).Related(&comments2) + if !compareComments(comments2, []string{"Comment 1", "Comment 2", "Comment 3"}) { + t.Errorf("Append new record to has many relations") + } + + if DB.Model(&post).Association("Comments").Count() != 3 { + t.Errorf("Post's comments count should be 3 after Append") + } + + // Delete + DB.Model(&post).Association("Comments").Delete(comments11) + + var comments3 []Comment + DB.Model(&post).Related(&comments3) + if !compareComments(comments3, []string{"Comment 3"}) { + t.Errorf("Delete an existing resource for has many relations") + } + + if DB.Model(&post).Association("Comments").Count() != 1 { + t.Errorf("Post's comments count should be 1 after Delete 2") + } + + // Replace + DB.Model(&Post{Id: 999}).Association("Comments").Replace() + + var comments4 []Comment + DB.Model(&post).Related(&comments4) + if len(comments4) == 0 { + t.Errorf("Replace for other resource should not clear all comments") + } + + DB.Model(&post).Association("Comments").Replace(&Comment{Content: "Comment 4"}, &Comment{Content: "Comment 5"}) + + var comments41 []Comment + DB.Model(&post).Related(&comments41) + if !compareComments(comments41, []string{"Comment 4", "Comment 5"}) { + t.Errorf("Replace has many relations") + } + + // Clear + DB.Model(&Post{Id: 999}).Association("Comments").Clear() + + var comments5 []Comment + DB.Model(&post).Related(&comments5) + if len(comments5) == 0 { + t.Errorf("Clear should not clear all comments") + } + + DB.Model(&post).Association("Comments").Clear() + + var comments51 []Comment + DB.Model(&post).Related(&comments51) + if len(comments51) != 0 { + t.Errorf("Clear has many relations") + } + + // Check Association mode with soft delete + var comment6 = Comment{ + Content: "comment 6", + } + DB.Model(&post).Association("Comments").Append(&comment6) + + if count := DB.Model(&post).Association("Comments").Count(); count != 1 { + t.Errorf("post's comments count should be 1 after Append, but got %v", count) + } + + DB.Delete(&comment6) + + if count := DB.Model(&post).Association("Comments").Count(); count != 0 { + t.Errorf("post's comments count should be 0 after comment been deleted, but got %v", count) + } + + var comments6 []Comment + if DB.Model(&post).Association("Comments").Find(&comments6); len(comments6) != 0 { + t.Errorf("post's comments count should be 0 when find with Find, but got %v", len(comments6)) + } + + if count := DB.Unscoped().Model(&post).Association("Comments").Count(); count != 1 { + t.Errorf("post's comments count should be 1 when query with Unscoped, but got %v", count) + } + + var comments61 []Comment + if DB.Unscoped().Model(&post).Association("Comments").Find(&comments61); len(comments61) != 1 { + t.Errorf("post's comments count should be 1 when query with Unscoped, but got %v", len(comments61)) + } +} + +func TestHasManyOverrideForeignKey1(t *testing.T) { + type Profile struct { + gorm.Model + Name string + UserRefer uint + } + + type User struct { + gorm.Model + Profile []Profile `gorm:"ForeignKey:UserRefer"` + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "has_many" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserRefer"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestHasManyOverrideForeignKey2(t *testing.T) { + type Profile struct { + gorm.Model + Name string + UserID uint + } + + type User struct { + gorm.Model + Refer string + Profile []Profile `gorm:"ForeignKey:UserID;AssociationForeignKey:Refer"` + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "has_many" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserID"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestManyToMany(t *testing.T) { + DB.Raw("delete from languages") + var languages = []Language{{Name: "ZH"}, {Name: "EN"}} + user := User{Name: "Many2Many", Languages: languages} + DB.Save(&user) + + // Query + var newLanguages []Language + DB.Model(&user).Related(&newLanguages, "Languages") + if len(newLanguages) != len([]string{"ZH", "EN"}) { + t.Errorf("Query many to many relations") + } + + DB.Model(&user).Association("Languages").Find(&newLanguages) + if len(newLanguages) != len([]string{"ZH", "EN"}) { + t.Errorf("Should be able to find many to many relations") + } + + if DB.Model(&user).Association("Languages").Count() != len([]string{"ZH", "EN"}) { + t.Errorf("Count should return correct result") + } + + // Append + DB.Model(&user).Association("Languages").Append(&Language{Name: "DE"}) + if DB.Where("name = ?", "DE").First(&Language{}).RecordNotFound() { + t.Errorf("New record should be saved when append") + } + + languageA := Language{Name: "AA"} + DB.Save(&languageA) + DB.Model(&User{Id: user.Id}).Association("Languages").Append(&languageA) + + languageC := Language{Name: "CC"} + DB.Save(&languageC) + DB.Model(&user).Association("Languages").Append(&[]Language{{Name: "BB"}, languageC}) + + DB.Model(&User{Id: user.Id}).Association("Languages").Append(&[]Language{{Name: "DD"}, {Name: "EE"}}) + + totalLanguages := []string{"ZH", "EN", "DE", "AA", "BB", "CC", "DD", "EE"} + + if DB.Model(&user).Association("Languages").Count() != len(totalLanguages) { + t.Errorf("All appended languages should be saved") + } + + // Delete + user.Languages = []Language{} + DB.Model(&user).Association("Languages").Find(&user.Languages) + + var language Language + DB.Where("name = ?", "EE").First(&language) + DB.Model(&user).Association("Languages").Delete(language, &language) + + if DB.Model(&user).Association("Languages").Count() != len(totalLanguages)-1 || len(user.Languages) != len(totalLanguages)-1 { + t.Errorf("Relations should be deleted with Delete") + } + if DB.Where("name = ?", "EE").First(&Language{}).RecordNotFound() { + t.Errorf("Language EE should not be deleted") + } + + DB.Where("name IN (?)", []string{"CC", "DD"}).Find(&languages) + + user2 := User{Name: "Many2Many_User2", Languages: languages} + DB.Save(&user2) + + DB.Model(&user).Association("Languages").Delete(languages, &languages) + if DB.Model(&user).Association("Languages").Count() != len(totalLanguages)-3 || len(user.Languages) != len(totalLanguages)-3 { + t.Errorf("Relations should be deleted with Delete") + } + + if DB.Model(&user2).Association("Languages").Count() == 0 { + t.Errorf("Other user's relations should not be deleted") + } + + // Replace + var languageB Language + DB.Where("name = ?", "BB").First(&languageB) + DB.Model(&user).Association("Languages").Replace(languageB) + if len(user.Languages) != 1 || DB.Model(&user).Association("Languages").Count() != 1 { + t.Errorf("Relations should be replaced") + } + + DB.Model(&user).Association("Languages").Replace() + if len(user.Languages) != 0 || DB.Model(&user).Association("Languages").Count() != 0 { + t.Errorf("Relations should be replaced with empty") + } + + DB.Model(&user).Association("Languages").Replace(&[]Language{{Name: "FF"}, {Name: "JJ"}}) + if len(user.Languages) != 2 || DB.Model(&user).Association("Languages").Count() != len([]string{"FF", "JJ"}) { + t.Errorf("Relations should be replaced") + } + + // Clear + DB.Model(&user).Association("Languages").Clear() + if len(user.Languages) != 0 || DB.Model(&user).Association("Languages").Count() != 0 { + t.Errorf("Relations should be cleared") + } + + // Check Association mode with soft delete + var language6 = Language{ + Name: "language 6", + } + DB.Model(&user).Association("Languages").Append(&language6) + + if count := DB.Model(&user).Association("Languages").Count(); count != 1 { + t.Errorf("user's languages count should be 1 after Append, but got %v", count) + } + + DB.Delete(&language6) + + if count := DB.Model(&user).Association("Languages").Count(); count != 0 { + t.Errorf("user's languages count should be 0 after language been deleted, but got %v", count) + } + + var languages6 []Language + if DB.Model(&user).Association("Languages").Find(&languages6); len(languages6) != 0 { + t.Errorf("user's languages count should be 0 when find with Find, but got %v", len(languages6)) + } + + if count := DB.Unscoped().Model(&user).Association("Languages").Count(); count != 1 { + t.Errorf("user's languages count should be 1 when query with Unscoped, but got %v", count) + } + + var languages61 []Language + if DB.Unscoped().Model(&user).Association("Languages").Find(&languages61); len(languages61) != 1 { + t.Errorf("user's languages count should be 1 when query with Unscoped, but got %v", len(languages61)) + } +} + +func TestRelated(t *testing.T) { + user := User{ + Name: "jinzhu", + BillingAddress: Address{Address1: "Billing Address - Address 1"}, + ShippingAddress: Address{Address1: "Shipping Address - Address 1"}, + Emails: []Email{{Email: "jinzhu@example.com"}, {Email: "jinzhu-2@example@example.com"}}, + CreditCard: CreditCard{Number: "1234567890"}, + Company: Company{Name: "company1"}, + } + + if err := DB.Save(&user).Error; err != nil { + t.Errorf("No error should happen when saving user") + } + + if user.CreditCard.ID == 0 { + t.Errorf("After user save, credit card should have id") + } + + if user.BillingAddress.ID == 0 { + t.Errorf("After user save, billing address should have id") + } + + if user.Emails[0].Id == 0 { + t.Errorf("After user save, billing address should have id") + } + + var emails []Email + DB.Model(&user).Related(&emails) + if len(emails) != 2 { + t.Errorf("Should have two emails") + } + + var emails2 []Email + DB.Model(&user).Where("email = ?", "jinzhu@example.com").Related(&emails2) + if len(emails2) != 1 { + t.Errorf("Should have two emails") + } + + var emails3 []*Email + DB.Model(&user).Related(&emails3) + if len(emails3) != 2 { + t.Errorf("Should have two emails") + } + + var user1 User + DB.Model(&user).Related(&user1.Emails) + if len(user1.Emails) != 2 { + t.Errorf("Should have only one email match related condition") + } + + var address1 Address + DB.Model(&user).Related(&address1, "BillingAddressId") + if address1.Address1 != "Billing Address - Address 1" { + t.Errorf("Should get billing address from user correctly") + } + + user1 = User{} + DB.Model(&address1).Related(&user1, "BillingAddressId") + if DB.NewRecord(user1) { + t.Errorf("Should get user from address correctly") + } + + var user2 User + DB.Model(&emails[0]).Related(&user2) + if user2.Id != user.Id || user2.Name != user.Name { + t.Errorf("Should get user from email correctly") + } + + var creditcard CreditCard + var user3 User + DB.First(&creditcard, "number = ?", "1234567890") + DB.Model(&creditcard).Related(&user3) + if user3.Id != user.Id || user3.Name != user.Name { + t.Errorf("Should get user from credit card correctly") + } + + if !DB.Model(&CreditCard{}).Related(&User{}).RecordNotFound() { + t.Errorf("RecordNotFound for Related") + } + + var company Company + if DB.Model(&user).Related(&company, "Company").RecordNotFound() || company.Name != "company1" { + t.Errorf("RecordNotFound for Related") + } +} + +func TestForeignKey(t *testing.T) { + for _, structField := range DB.NewScope(&User{}).GetStructFields() { + for _, foreignKey := range []string{"BillingAddressID", "ShippingAddressId", "CompanyID"} { + if structField.Name == foreignKey && !structField.IsForeignKey { + t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey)) + } + } + } + + for _, structField := range DB.NewScope(&Email{}).GetStructFields() { + for _, foreignKey := range []string{"UserId"} { + if structField.Name == foreignKey && !structField.IsForeignKey { + t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey)) + } + } + } + + for _, structField := range DB.NewScope(&Post{}).GetStructFields() { + for _, foreignKey := range []string{"CategoryId", "MainCategoryId"} { + if structField.Name == foreignKey && !structField.IsForeignKey { + t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey)) + } + } + } + + for _, structField := range DB.NewScope(&Comment{}).GetStructFields() { + for _, foreignKey := range []string{"PostId"} { + if structField.Name == foreignKey && !structField.IsForeignKey { + t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey)) + } + } + } +} + +func testForeignKey(t *testing.T, source interface{}, sourceFieldName string, target interface{}, targetFieldName string) { + if dialect := os.Getenv("GORM_DIALECT"); dialect == "" || dialect == "sqlite" { + // sqlite does not support ADD CONSTRAINT in ALTER TABLE + return + } + targetScope := DB.NewScope(target) + targetTableName := targetScope.TableName() + modelScope := DB.NewScope(source) + modelField, ok := modelScope.FieldByName(sourceFieldName) + if !ok { + t.Fatalf(fmt.Sprintf("Failed to get field by name: %v", sourceFieldName)) + } + targetField, ok := targetScope.FieldByName(targetFieldName) + if !ok { + t.Fatalf(fmt.Sprintf("Failed to get field by name: %v", targetFieldName)) + } + dest := fmt.Sprintf("%v(%v)", targetTableName, targetField.DBName) + err := DB.Model(source).AddForeignKey(modelField.DBName, dest, "CASCADE", "CASCADE").Error + if err != nil { + t.Fatalf(fmt.Sprintf("Failed to create foreign key: %v", err)) + } +} + +func TestLongForeignKey(t *testing.T) { + testForeignKey(t, &NotSoLongTableName{}, "ReallyLongThingID", &ReallyLongTableNameToTestMySQLNameLengthLimit{}, "ID") +} + +func TestLongForeignKeyWithShortDest(t *testing.T) { + testForeignKey(t, &ReallyLongThingThatReferencesShort{}, "ShortID", &Short{}, "ID") +} + +func TestHasManyChildrenWithOneStruct(t *testing.T) { + category := Category{ + Name: "main", + Categories: []Category{ + {Name: "sub1"}, + {Name: "sub2"}, + }, + } + + DB.Save(&category) +} + +func TestSkipSaveAssociation(t *testing.T) { + type Company struct { + gorm.Model + Name string + } + + type User struct { + gorm.Model + Name string + CompanyID uint + Company Company `gorm:"save_associations:false"` + } + DB.AutoMigrate(&Company{}, &User{}) + + DB.Save(&User{Name: "jinzhu", Company: Company{Name: "skip_save_association"}}) + + if !DB.Where("name = ?", "skip_save_association").First(&Company{}).RecordNotFound() { + t.Errorf("Company skip_save_association should not been saved") + } +} diff --git a/vendor/github.com/jinzhu/gorm/callback_system_test.go b/vendor/github.com/jinzhu/gorm/callback_system_test.go new file mode 100644 index 000000000000..13ca3f428305 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback_system_test.go @@ -0,0 +1,112 @@ +package gorm + +import ( + "reflect" + "runtime" + "strings" + "testing" +) + +func equalFuncs(funcs []*func(s *Scope), fnames []string) bool { + var names []string + for _, f := range funcs { + fnames := strings.Split(runtime.FuncForPC(reflect.ValueOf(*f).Pointer()).Name(), ".") + names = append(names, fnames[len(fnames)-1]) + } + return reflect.DeepEqual(names, fnames) +} + +func create(s *Scope) {} +func beforeCreate1(s *Scope) {} +func beforeCreate2(s *Scope) {} +func afterCreate1(s *Scope) {} +func afterCreate2(s *Scope) {} + +func TestRegisterCallback(t *testing.T) { + var callback = &Callback{} + + callback.Create().Register("before_create1", beforeCreate1) + callback.Create().Register("before_create2", beforeCreate2) + callback.Create().Register("create", create) + callback.Create().Register("after_create1", afterCreate1) + callback.Create().Register("after_create2", afterCreate2) + + if !equalFuncs(callback.creates, []string{"beforeCreate1", "beforeCreate2", "create", "afterCreate1", "afterCreate2"}) { + t.Errorf("register callback") + } +} + +func TestRegisterCallbackWithOrder(t *testing.T) { + var callback1 = &Callback{} + callback1.Create().Register("before_create1", beforeCreate1) + callback1.Create().Register("create", create) + callback1.Create().Register("after_create1", afterCreate1) + callback1.Create().Before("after_create1").Register("after_create2", afterCreate2) + if !equalFuncs(callback1.creates, []string{"beforeCreate1", "create", "afterCreate2", "afterCreate1"}) { + t.Errorf("register callback with order") + } + + var callback2 = &Callback{} + + callback2.Update().Register("create", create) + callback2.Update().Before("create").Register("before_create1", beforeCreate1) + callback2.Update().After("after_create2").Register("after_create1", afterCreate1) + callback2.Update().Before("before_create1").Register("before_create2", beforeCreate2) + callback2.Update().Register("after_create2", afterCreate2) + + if !equalFuncs(callback2.updates, []string{"beforeCreate2", "beforeCreate1", "create", "afterCreate2", "afterCreate1"}) { + t.Errorf("register callback with order") + } +} + +func TestRegisterCallbackWithComplexOrder(t *testing.T) { + var callback1 = &Callback{} + + callback1.Query().Before("after_create1").After("before_create1").Register("create", create) + callback1.Query().Register("before_create1", beforeCreate1) + callback1.Query().Register("after_create1", afterCreate1) + + if !equalFuncs(callback1.queries, []string{"beforeCreate1", "create", "afterCreate1"}) { + t.Errorf("register callback with order") + } + + var callback2 = &Callback{} + + callback2.Delete().Before("after_create1").After("before_create1").Register("create", create) + callback2.Delete().Before("create").Register("before_create1", beforeCreate1) + callback2.Delete().After("before_create1").Register("before_create2", beforeCreate2) + callback2.Delete().Register("after_create1", afterCreate1) + callback2.Delete().After("after_create1").Register("after_create2", afterCreate2) + + if !equalFuncs(callback2.deletes, []string{"beforeCreate1", "beforeCreate2", "create", "afterCreate1", "afterCreate2"}) { + t.Errorf("register callback with order") + } +} + +func replaceCreate(s *Scope) {} + +func TestReplaceCallback(t *testing.T) { + var callback = &Callback{} + + callback.Create().Before("after_create1").After("before_create1").Register("create", create) + callback.Create().Register("before_create1", beforeCreate1) + callback.Create().Register("after_create1", afterCreate1) + callback.Create().Replace("create", replaceCreate) + + if !equalFuncs(callback.creates, []string{"beforeCreate1", "replaceCreate", "afterCreate1"}) { + t.Errorf("replace callback") + } +} + +func TestRemoveCallback(t *testing.T) { + var callback = &Callback{} + + callback.Create().Before("after_create1").After("before_create1").Register("create", create) + callback.Create().Register("before_create1", beforeCreate1) + callback.Create().Register("after_create1", afterCreate1) + callback.Create().Remove("create") + + if !equalFuncs(callback.creates, []string{"beforeCreate1", "afterCreate1"}) { + t.Errorf("remove callback") + } +} diff --git a/vendor/github.com/jinzhu/gorm/callbacks_test.go b/vendor/github.com/jinzhu/gorm/callbacks_test.go new file mode 100644 index 000000000000..a58913d767de --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callbacks_test.go @@ -0,0 +1,177 @@ +package gorm_test + +import ( + "errors" + + "github.com/jinzhu/gorm" + + "reflect" + "testing" +) + +func (s *Product) BeforeCreate() (err error) { + if s.Code == "Invalid" { + err = errors.New("invalid product") + } + s.BeforeCreateCallTimes = s.BeforeCreateCallTimes + 1 + return +} + +func (s *Product) BeforeUpdate() (err error) { + if s.Code == "dont_update" { + err = errors.New("can't update") + } + s.BeforeUpdateCallTimes = s.BeforeUpdateCallTimes + 1 + return +} + +func (s *Product) BeforeSave() (err error) { + if s.Code == "dont_save" { + err = errors.New("can't save") + } + s.BeforeSaveCallTimes = s.BeforeSaveCallTimes + 1 + return +} + +func (s *Product) AfterFind() { + s.AfterFindCallTimes = s.AfterFindCallTimes + 1 +} + +func (s *Product) AfterCreate(tx *gorm.DB) { + tx.Model(s).UpdateColumn(Product{AfterCreateCallTimes: s.AfterCreateCallTimes + 1}) +} + +func (s *Product) AfterUpdate() { + s.AfterUpdateCallTimes = s.AfterUpdateCallTimes + 1 +} + +func (s *Product) AfterSave() (err error) { + if s.Code == "after_save_error" { + err = errors.New("can't save") + } + s.AfterSaveCallTimes = s.AfterSaveCallTimes + 1 + return +} + +func (s *Product) BeforeDelete() (err error) { + if s.Code == "dont_delete" { + err = errors.New("can't delete") + } + s.BeforeDeleteCallTimes = s.BeforeDeleteCallTimes + 1 + return +} + +func (s *Product) AfterDelete() (err error) { + if s.Code == "after_delete_error" { + err = errors.New("can't delete") + } + s.AfterDeleteCallTimes = s.AfterDeleteCallTimes + 1 + return +} + +func (s *Product) GetCallTimes() []int64 { + return []int64{s.BeforeCreateCallTimes, s.BeforeSaveCallTimes, s.BeforeUpdateCallTimes, s.AfterCreateCallTimes, s.AfterSaveCallTimes, s.AfterUpdateCallTimes, s.BeforeDeleteCallTimes, s.AfterDeleteCallTimes, s.AfterFindCallTimes} +} + +func TestRunCallbacks(t *testing.T) { + p := Product{Code: "unique_code", Price: 100} + DB.Save(&p) + + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 1, 0, 1, 1, 0, 0, 0, 0}) { + t.Errorf("Callbacks should be invoked successfully, %v", p.GetCallTimes()) + } + + DB.Where("Code = ?", "unique_code").First(&p) + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 1, 0, 1, 0, 0, 0, 0, 1}) { + t.Errorf("After callbacks values are not saved, %v", p.GetCallTimes()) + } + + p.Price = 200 + DB.Save(&p) + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 1, 1, 0, 0, 1}) { + t.Errorf("After update callbacks should be invoked successfully, %v", p.GetCallTimes()) + } + + var products []Product + DB.Find(&products, "code = ?", "unique_code") + if products[0].AfterFindCallTimes != 2 { + t.Errorf("AfterFind callbacks should work with slice") + } + + DB.Where("Code = ?", "unique_code").First(&p) + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 0, 0, 0, 0, 2}) { + t.Errorf("After update callbacks values are not saved, %v", p.GetCallTimes()) + } + + DB.Delete(&p) + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 0, 0, 1, 1, 2}) { + t.Errorf("After delete callbacks should be invoked successfully, %v", p.GetCallTimes()) + } + + if DB.Where("Code = ?", "unique_code").First(&p).Error == nil { + t.Errorf("Can't find a deleted record") + } +} + +func TestCallbacksWithErrors(t *testing.T) { + p := Product{Code: "Invalid", Price: 100} + if DB.Save(&p).Error == nil { + t.Errorf("An error from before create callbacks happened when create with invalid value") + } + + if DB.Where("code = ?", "Invalid").First(&Product{}).Error == nil { + t.Errorf("Should not save record that have errors") + } + + if DB.Save(&Product{Code: "dont_save", Price: 100}).Error == nil { + t.Errorf("An error from after create callbacks happened when create with invalid value") + } + + p2 := Product{Code: "update_callback", Price: 100} + DB.Save(&p2) + + p2.Code = "dont_update" + if DB.Save(&p2).Error == nil { + t.Errorf("An error from before update callbacks happened when update with invalid value") + } + + if DB.Where("code = ?", "update_callback").First(&Product{}).Error != nil { + t.Errorf("Record Should not be updated due to errors happened in before update callback") + } + + if DB.Where("code = ?", "dont_update").First(&Product{}).Error == nil { + t.Errorf("Record Should not be updated due to errors happened in before update callback") + } + + p2.Code = "dont_save" + if DB.Save(&p2).Error == nil { + t.Errorf("An error from before save callbacks happened when update with invalid value") + } + + p3 := Product{Code: "dont_delete", Price: 100} + DB.Save(&p3) + if DB.Delete(&p3).Error == nil { + t.Errorf("An error from before delete callbacks happened when delete") + } + + if DB.Where("Code = ?", "dont_delete").First(&p3).Error != nil { + t.Errorf("An error from before delete callbacks happened") + } + + p4 := Product{Code: "after_save_error", Price: 100} + DB.Save(&p4) + if err := DB.First(&Product{}, "code = ?", "after_save_error").Error; err == nil { + t.Errorf("Record should be reverted if get an error in after save callback") + } + + p5 := Product{Code: "after_delete_error", Price: 100} + DB.Save(&p5) + if err := DB.First(&Product{}, "code = ?", "after_delete_error").Error; err != nil { + t.Errorf("Record should be found") + } + + DB.Delete(&p5) + if err := DB.First(&Product{}, "code = ?", "after_delete_error").Error; err != nil { + t.Errorf("Record shouldn't be deleted because of an error happened in after delete callback") + } +} diff --git a/vendor/github.com/jinzhu/gorm/create_test.go b/vendor/github.com/jinzhu/gorm/create_test.go new file mode 100644 index 000000000000..d67d34fc782a --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/create_test.go @@ -0,0 +1,180 @@ +package gorm_test + +import ( + "os" + "reflect" + "testing" + "time" +) + +func TestCreate(t *testing.T) { + float := 35.03554004971999 + now := time.Now() + user := User{Name: "CreateUser", Age: 18, Birthday: &now, UserNum: Num(111), PasswordHash: []byte{'f', 'a', 'k', '4'}, Latitude: float} + + if !DB.NewRecord(user) || !DB.NewRecord(&user) { + t.Error("User should be new record before create") + } + + if count := DB.Save(&user).RowsAffected; count != 1 { + t.Error("There should be one record be affected when create record") + } + + if DB.NewRecord(user) || DB.NewRecord(&user) { + t.Error("User should not new record after save") + } + + var newUser User + DB.First(&newUser, user.Id) + + if !reflect.DeepEqual(newUser.PasswordHash, []byte{'f', 'a', 'k', '4'}) { + t.Errorf("User's PasswordHash should be saved ([]byte)") + } + + if newUser.Age != 18 { + t.Errorf("User's Age should be saved (int)") + } + + if newUser.UserNum != Num(111) { + t.Errorf("User's UserNum should be saved (custom type)") + } + + if newUser.Latitude != float { + t.Errorf("Float64 should not be changed after save") + } + + if user.CreatedAt.IsZero() { + t.Errorf("Should have created_at after create") + } + + if newUser.CreatedAt.IsZero() { + t.Errorf("Should have created_at after create") + } + + DB.Model(user).Update("name", "create_user_new_name") + DB.First(&user, user.Id) + if user.CreatedAt.Format(time.RFC3339Nano) != newUser.CreatedAt.Format(time.RFC3339Nano) { + t.Errorf("CreatedAt should not be changed after update") + } +} + +func TestCreateWithAutoIncrement(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect != "postgres" { + t.Skip("Skipping this because only postgres properly support auto_increment on a non-primary_key column") + } + user1 := User{} + user2 := User{} + + DB.Create(&user1) + DB.Create(&user2) + + if user2.Sequence-user1.Sequence != 1 { + t.Errorf("Auto increment should apply on Sequence") + } +} + +func TestCreateWithNoGORMPrimayKey(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect == "mssql" { + t.Skip("Skipping this because MSSQL will return identity only if the table has an Id column") + } + + jt := JoinTable{From: 1, To: 2} + err := DB.Create(&jt).Error + if err != nil { + t.Errorf("No error should happen when create a record without a GORM primary key. But in the database this primary key exists and is the union of 2 or more fields\n But got: %s", err) + } +} + +func TestCreateWithNoStdPrimaryKeyAndDefaultValues(t *testing.T) { + animal := Animal{Name: "Ferdinand"} + if DB.Save(&animal).Error != nil { + t.Errorf("No error should happen when create a record without std primary key") + } + + if animal.Counter == 0 { + t.Errorf("No std primary key should be filled value after create") + } + + if animal.Name != "Ferdinand" { + t.Errorf("Default value should be overrided") + } + + // Test create with default value not overrided + an := Animal{From: "nerdz"} + + if DB.Save(&an).Error != nil { + t.Errorf("No error should happen when create an record without std primary key") + } + + // We must fetch the value again, to have the default fields updated + // (We can't do this in the update statements, since sql default can be expressions + // And be different from the fields' type (eg. a time.Time fields has a default value of "now()" + DB.Model(Animal{}).Where(&Animal{Counter: an.Counter}).First(&an) + + if an.Name != "galeone" { + t.Errorf("Default value should fill the field. But got %v", an.Name) + } +} + +func TestAnonymousScanner(t *testing.T) { + user := User{Name: "anonymous_scanner", Role: Role{Name: "admin"}} + DB.Save(&user) + + var user2 User + DB.First(&user2, "name = ?", "anonymous_scanner") + if user2.Role.Name != "admin" { + t.Errorf("Should be able to get anonymous scanner") + } + + if !user2.IsAdmin() { + t.Errorf("Should be able to get anonymous scanner") + } +} + +func TestAnonymousField(t *testing.T) { + user := User{Name: "anonymous_field", Company: Company{Name: "company"}} + DB.Save(&user) + + var user2 User + DB.First(&user2, "name = ?", "anonymous_field") + DB.Model(&user2).Related(&user2.Company) + if user2.Company.Name != "company" { + t.Errorf("Should be able to get anonymous field") + } +} + +func TestSelectWithCreate(t *testing.T) { + user := getPreparedUser("select_user", "select_with_create") + DB.Select("Name", "BillingAddress", "CreditCard", "Company", "Emails").Create(user) + + var queryuser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryuser, user.Id) + + if queryuser.Name != user.Name || queryuser.Age == user.Age { + t.Errorf("Should only create users with name column") + } + + if queryuser.BillingAddressID.Int64 == 0 || queryuser.ShippingAddressId != 0 || + queryuser.CreditCard.ID == 0 || len(queryuser.Emails) == 0 { + t.Errorf("Should only create selected relationships") + } +} + +func TestOmitWithCreate(t *testing.T) { + user := getPreparedUser("omit_user", "omit_with_create") + DB.Omit("Name", "BillingAddress", "CreditCard", "Company", "Emails").Create(user) + + var queryuser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryuser, user.Id) + + if queryuser.Name == user.Name || queryuser.Age != user.Age { + t.Errorf("Should only create users with age column") + } + + if queryuser.BillingAddressID.Int64 != 0 || queryuser.ShippingAddressId == 0 || + queryuser.CreditCard.ID != 0 || len(queryuser.Emails) != 0 { + t.Errorf("Should not create omitted relationships") + } +} diff --git a/vendor/github.com/jinzhu/gorm/customize_column_test.go b/vendor/github.com/jinzhu/gorm/customize_column_test.go new file mode 100644 index 000000000000..ddb536b870c8 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/customize_column_test.go @@ -0,0 +1,281 @@ +package gorm_test + +import ( + "testing" + "time" + + "github.com/jinzhu/gorm" +) + +type CustomizeColumn struct { + ID int64 `gorm:"column:mapped_id; primary_key:yes"` + Name string `gorm:"column:mapped_name"` + Date *time.Time `gorm:"column:mapped_time"` +} + +// Make sure an ignored field does not interfere with another field's custom +// column name that matches the ignored field. +type CustomColumnAndIgnoredFieldClash struct { + Body string `sql:"-"` + RawBody string `gorm:"column:body"` +} + +func TestCustomizeColumn(t *testing.T) { + col := "mapped_name" + DB.DropTable(&CustomizeColumn{}) + DB.AutoMigrate(&CustomizeColumn{}) + + scope := DB.NewScope(&CustomizeColumn{}) + if !scope.Dialect().HasColumn(scope.TableName(), col) { + t.Errorf("CustomizeColumn should have column %s", col) + } + + col = "mapped_id" + if scope.PrimaryKey() != col { + t.Errorf("CustomizeColumn should have primary key %s, but got %q", col, scope.PrimaryKey()) + } + + expected := "foo" + now := time.Now() + cc := CustomizeColumn{ID: 666, Name: expected, Date: &now} + + if count := DB.Create(&cc).RowsAffected; count != 1 { + t.Error("There should be one record be affected when create record") + } + + var cc1 CustomizeColumn + DB.First(&cc1, 666) + + if cc1.Name != expected { + t.Errorf("Failed to query CustomizeColumn") + } + + cc.Name = "bar" + DB.Save(&cc) + + var cc2 CustomizeColumn + DB.First(&cc2, 666) + if cc2.Name != "bar" { + t.Errorf("Failed to query CustomizeColumn") + } +} + +func TestCustomColumnAndIgnoredFieldClash(t *testing.T) { + DB.DropTable(&CustomColumnAndIgnoredFieldClash{}) + if err := DB.AutoMigrate(&CustomColumnAndIgnoredFieldClash{}).Error; err != nil { + t.Errorf("Should not raise error: %s", err) + } +} + +type CustomizePerson struct { + IdPerson string `gorm:"column:idPerson;primary_key:true"` + Accounts []CustomizeAccount `gorm:"many2many:PersonAccount;associationforeignkey:idAccount;foreignkey:idPerson"` +} + +type CustomizeAccount struct { + IdAccount string `gorm:"column:idAccount;primary_key:true"` + Name string +} + +func TestManyToManyWithCustomizedColumn(t *testing.T) { + DB.DropTable(&CustomizePerson{}, &CustomizeAccount{}, "PersonAccount") + DB.AutoMigrate(&CustomizePerson{}, &CustomizeAccount{}) + + account := CustomizeAccount{IdAccount: "account", Name: "id1"} + person := CustomizePerson{ + IdPerson: "person", + Accounts: []CustomizeAccount{account}, + } + + if err := DB.Create(&account).Error; err != nil { + t.Errorf("no error should happen, but got %v", err) + } + + if err := DB.Create(&person).Error; err != nil { + t.Errorf("no error should happen, but got %v", err) + } + + var person1 CustomizePerson + scope := DB.NewScope(nil) + if err := DB.Preload("Accounts").First(&person1, scope.Quote("idPerson")+" = ?", person.IdPerson).Error; err != nil { + t.Errorf("no error should happen when preloading customized column many2many relations, but got %v", err) + } + + if len(person1.Accounts) != 1 || person1.Accounts[0].IdAccount != "account" { + t.Errorf("should preload correct accounts") + } +} + +type CustomizeUser struct { + gorm.Model + Email string `sql:"column:email_address"` +} + +type CustomizeInvitation struct { + gorm.Model + Address string `sql:"column:invitation"` + Person *CustomizeUser `gorm:"foreignkey:Email;associationforeignkey:invitation"` +} + +func TestOneToOneWithCustomizedColumn(t *testing.T) { + DB.DropTable(&CustomizeUser{}, &CustomizeInvitation{}) + DB.AutoMigrate(&CustomizeUser{}, &CustomizeInvitation{}) + + user := CustomizeUser{ + Email: "hello@example.com", + } + invitation := CustomizeInvitation{ + Address: "hello@example.com", + } + + DB.Create(&user) + DB.Create(&invitation) + + var invitation2 CustomizeInvitation + if err := DB.Preload("Person").Find(&invitation2, invitation.ID).Error; err != nil { + t.Errorf("no error should happen, but got %v", err) + } + + if invitation2.Person.Email != user.Email { + t.Errorf("Should preload one to one relation with customize foreign keys") + } +} + +type PromotionDiscount struct { + gorm.Model + Name string + Coupons []*PromotionCoupon `gorm:"ForeignKey:discount_id"` + Rule *PromotionRule `gorm:"ForeignKey:discount_id"` + Benefits []PromotionBenefit `gorm:"ForeignKey:promotion_id"` +} + +type PromotionBenefit struct { + gorm.Model + Name string + PromotionID uint + Discount PromotionDiscount `gorm:"ForeignKey:promotion_id"` +} + +type PromotionCoupon struct { + gorm.Model + Code string + DiscountID uint + Discount PromotionDiscount +} + +type PromotionRule struct { + gorm.Model + Name string + Begin *time.Time + End *time.Time + DiscountID uint + Discount *PromotionDiscount +} + +func TestOneToManyWithCustomizedColumn(t *testing.T) { + DB.DropTable(&PromotionDiscount{}, &PromotionCoupon{}) + DB.AutoMigrate(&PromotionDiscount{}, &PromotionCoupon{}) + + discount := PromotionDiscount{ + Name: "Happy New Year", + Coupons: []*PromotionCoupon{ + {Code: "newyear1"}, + {Code: "newyear2"}, + }, + } + + if err := DB.Create(&discount).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + var discount1 PromotionDiscount + if err := DB.Preload("Coupons").First(&discount1, "id = ?", discount.ID).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if len(discount.Coupons) != 2 { + t.Errorf("should find two coupons") + } + + var coupon PromotionCoupon + if err := DB.Preload("Discount").First(&coupon, "code = ?", "newyear1").Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if coupon.Discount.Name != "Happy New Year" { + t.Errorf("should preload discount from coupon") + } +} + +func TestHasOneWithPartialCustomizedColumn(t *testing.T) { + DB.DropTable(&PromotionDiscount{}, &PromotionRule{}) + DB.AutoMigrate(&PromotionDiscount{}, &PromotionRule{}) + + var begin = time.Now() + var end = time.Now().Add(24 * time.Hour) + discount := PromotionDiscount{ + Name: "Happy New Year 2", + Rule: &PromotionRule{ + Name: "time_limited", + Begin: &begin, + End: &end, + }, + } + + if err := DB.Create(&discount).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + var discount1 PromotionDiscount + if err := DB.Preload("Rule").First(&discount1, "id = ?", discount.ID).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if discount.Rule.Begin.Format(time.RFC3339Nano) != begin.Format(time.RFC3339Nano) { + t.Errorf("Should be able to preload Rule") + } + + var rule PromotionRule + if err := DB.Preload("Discount").First(&rule, "name = ?", "time_limited").Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if rule.Discount.Name != "Happy New Year 2" { + t.Errorf("should preload discount from rule") + } +} + +func TestBelongsToWithPartialCustomizedColumn(t *testing.T) { + DB.DropTable(&PromotionDiscount{}, &PromotionBenefit{}) + DB.AutoMigrate(&PromotionDiscount{}, &PromotionBenefit{}) + + discount := PromotionDiscount{ + Name: "Happy New Year 3", + Benefits: []PromotionBenefit{ + {Name: "free cod"}, + {Name: "free shipping"}, + }, + } + + if err := DB.Create(&discount).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + var discount1 PromotionDiscount + if err := DB.Preload("Benefits").First(&discount1, "id = ?", discount.ID).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if len(discount.Benefits) != 2 { + t.Errorf("should find two benefits") + } + + var benefit PromotionBenefit + if err := DB.Preload("Discount").First(&benefit, "name = ?", "free cod").Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if benefit.Discount.Name != "Happy New Year 3" { + t.Errorf("should preload discount from coupon") + } +} diff --git a/vendor/github.com/jinzhu/gorm/delete_test.go b/vendor/github.com/jinzhu/gorm/delete_test.go new file mode 100644 index 000000000000..043641f70e09 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/delete_test.go @@ -0,0 +1,91 @@ +package gorm_test + +import ( + "testing" + "time" +) + +func TestDelete(t *testing.T) { + user1, user2 := User{Name: "delete1"}, User{Name: "delete2"} + DB.Save(&user1) + DB.Save(&user2) + + if err := DB.Delete(&user1).Error; err != nil { + t.Errorf("No error should happen when delete a record, err=%s", err) + } + + if !DB.Where("name = ?", user1.Name).First(&User{}).RecordNotFound() { + t.Errorf("User can't be found after delete") + } + + if DB.Where("name = ?", user2.Name).First(&User{}).RecordNotFound() { + t.Errorf("Other users that not deleted should be found-able") + } +} + +func TestInlineDelete(t *testing.T) { + user1, user2 := User{Name: "inline_delete1"}, User{Name: "inline_delete2"} + DB.Save(&user1) + DB.Save(&user2) + + if DB.Delete(&User{}, user1.Id).Error != nil { + t.Errorf("No error should happen when delete a record") + } else if !DB.Where("name = ?", user1.Name).First(&User{}).RecordNotFound() { + t.Errorf("User can't be found after delete") + } + + if err := DB.Delete(&User{}, "name = ?", user2.Name).Error; err != nil { + t.Errorf("No error should happen when delete a record, err=%s", err) + } else if !DB.Where("name = ?", user2.Name).First(&User{}).RecordNotFound() { + t.Errorf("User can't be found after delete") + } +} + +func TestSoftDelete(t *testing.T) { + type User struct { + Id int64 + Name string + DeletedAt *time.Time + } + DB.AutoMigrate(&User{}) + + user := User{Name: "soft_delete"} + DB.Save(&user) + DB.Delete(&user) + + if DB.First(&User{}, "name = ?", user.Name).Error == nil { + t.Errorf("Can't find a soft deleted record") + } + + if err := DB.Unscoped().First(&User{}, "name = ?", user.Name).Error; err != nil { + t.Errorf("Should be able to find soft deleted record with Unscoped, but err=%s", err) + } + + DB.Unscoped().Delete(&user) + if !DB.Unscoped().First(&User{}, "name = ?", user.Name).RecordNotFound() { + t.Errorf("Can't find permanently deleted record") + } +} + +func TestSoftDeleteWithCustomizedDeletedAtColumnName(t *testing.T) { + creditCard := CreditCard{Number: "411111111234567"} + DB.Save(&creditCard) + DB.Delete(&creditCard) + + if deletedAtField, ok := DB.NewScope(&CreditCard{}).FieldByName("DeletedAt"); !ok || deletedAtField.DBName != "deleted_time" { + t.Errorf("CreditCard's DeletedAt's column name should be `deleted_time`") + } + + if DB.First(&CreditCard{}, "number = ?", creditCard.Number).Error == nil { + t.Errorf("Can't find a soft deleted record") + } + + if err := DB.Unscoped().First(&CreditCard{}, "number = ?", creditCard.Number).Error; err != nil { + t.Errorf("Should be able to find soft deleted record with Unscoped, but err=%s", err) + } + + DB.Unscoped().Delete(&creditCard) + if !DB.Unscoped().First(&CreditCard{}, "number = ?", creditCard.Number).RecordNotFound() { + t.Errorf("Can't find permanently deleted record") + } +} diff --git a/vendor/github.com/jinzhu/gorm/dialects/mysql/BUILD b/vendor/github.com/jinzhu/gorm/dialects/mysql/BUILD index c0c2bbfe574b..8e4d81006564 100644 --- a/vendor/github.com/jinzhu/gorm/dialects/mysql/BUILD +++ b/vendor/github.com/jinzhu/gorm/dialects/mysql/BUILD @@ -1,14 +1,10 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["mysql.go"], importpath = "github.com/jinzhu/gorm/dialects/mysql", + visibility = ["//visibility:public"], deps = ["//vendor/github.com/go-sql-driver/mysql:go_default_library"], ) @@ -23,4 +19,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/jinzhu/gorm/dialects/sqlite/BUILD b/vendor/github.com/jinzhu/gorm/dialects/sqlite/BUILD index 8a83747a8d63..96bca8c1422c 100644 --- a/vendor/github.com/jinzhu/gorm/dialects/sqlite/BUILD +++ b/vendor/github.com/jinzhu/gorm/dialects/sqlite/BUILD @@ -1,14 +1,10 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["sqlite.go"], importpath = "github.com/jinzhu/gorm/dialects/sqlite", + visibility = ["//visibility:public"], deps = ["//vendor/github.com/mattn/go-sqlite3:go_default_library"], ) @@ -23,4 +19,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/jinzhu/gorm/embedded_struct_test.go b/vendor/github.com/jinzhu/gorm/embedded_struct_test.go new file mode 100644 index 000000000000..91dd05633df8 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/embedded_struct_test.go @@ -0,0 +1,73 @@ +package gorm_test + +import "testing" + +type BasePost struct { + Id int64 + Title string + URL string +} + +type Author struct { + ID string + Name string + Email string +} + +type HNPost struct { + BasePost + Author `gorm:"embedded_prefix:user_"` // Embedded struct + Upvotes int32 +} + +type EngadgetPost struct { + BasePost BasePost `gorm:"embedded"` + Author Author `gorm:"embedded;embedded_prefix:author_"` // Embedded struct + ImageUrl string +} + +func TestPrefixColumnNameForEmbeddedStruct(t *testing.T) { + dialect := DB.NewScope(&EngadgetPost{}).Dialect() + engadgetPostScope := DB.NewScope(&EngadgetPost{}) + if !dialect.HasColumn(engadgetPostScope.TableName(), "author_id") || !dialect.HasColumn(engadgetPostScope.TableName(), "author_name") || !dialect.HasColumn(engadgetPostScope.TableName(), "author_email") { + t.Errorf("should has prefix for embedded columns") + } + + if len(engadgetPostScope.PrimaryFields()) != 1 { + t.Errorf("should have only one primary field with embedded struct, but got %v", len(engadgetPostScope.PrimaryFields())) + } + + hnScope := DB.NewScope(&HNPost{}) + if !dialect.HasColumn(hnScope.TableName(), "user_id") || !dialect.HasColumn(hnScope.TableName(), "user_name") || !dialect.HasColumn(hnScope.TableName(), "user_email") { + t.Errorf("should has prefix for embedded columns") + } +} + +func TestSaveAndQueryEmbeddedStruct(t *testing.T) { + DB.Save(&HNPost{BasePost: BasePost{Title: "news"}}) + DB.Save(&HNPost{BasePost: BasePost{Title: "hn_news"}}) + var news HNPost + if err := DB.First(&news, "title = ?", "hn_news").Error; err != nil { + t.Errorf("no error should happen when query with embedded struct, but got %v", err) + } else if news.Title != "hn_news" { + t.Errorf("embedded struct's value should be scanned correctly") + } + + DB.Save(&EngadgetPost{BasePost: BasePost{Title: "engadget_news"}}) + var egNews EngadgetPost + if err := DB.First(&egNews, "title = ?", "engadget_news").Error; err != nil { + t.Errorf("no error should happen when query with embedded struct, but got %v", err) + } else if egNews.BasePost.Title != "engadget_news" { + t.Errorf("embedded struct's value should be scanned correctly") + } + + if DB.NewScope(&HNPost{}).PrimaryField() == nil { + t.Errorf("primary key with embedded struct should works") + } + + for _, field := range DB.NewScope(&HNPost{}).Fields() { + if field.Name == "BasePost" { + t.Errorf("scope Fields should not contain embedded struct") + } + } +} diff --git a/vendor/github.com/jinzhu/gorm/errors_test.go b/vendor/github.com/jinzhu/gorm/errors_test.go new file mode 100644 index 000000000000..9a428dec01fb --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/errors_test.go @@ -0,0 +1,20 @@ +package gorm_test + +import ( + "errors" + "testing" + + "github.com/jinzhu/gorm" +) + +func TestErrorsCanBeUsedOutsideGorm(t *testing.T) { + errs := []error{errors.New("First"), errors.New("Second")} + + gErrs := gorm.Errors(errs) + gErrs = gErrs.Add(errors.New("Third")) + gErrs = gErrs.Add(gErrs) + + if gErrs.Error() != "First; Second; Third" { + t.Fatalf("Gave wrong error, got %s", gErrs.Error()) + } +} diff --git a/vendor/github.com/jinzhu/gorm/field_test.go b/vendor/github.com/jinzhu/gorm/field_test.go new file mode 100644 index 000000000000..30e9a778d209 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/field_test.go @@ -0,0 +1,49 @@ +package gorm_test + +import ( + "testing" + + "github.com/jinzhu/gorm" +) + +type CalculateField struct { + gorm.Model + Name string + Children []CalculateFieldChild + Category CalculateFieldCategory + EmbeddedField +} + +type EmbeddedField struct { + EmbeddedName string `sql:"NOT NULL;DEFAULT:'hello'"` +} + +type CalculateFieldChild struct { + gorm.Model + CalculateFieldID uint + Name string +} + +type CalculateFieldCategory struct { + gorm.Model + CalculateFieldID uint + Name string +} + +func TestCalculateField(t *testing.T) { + var field CalculateField + var scope = DB.NewScope(&field) + if field, ok := scope.FieldByName("Children"); !ok || field.Relationship == nil { + t.Errorf("Should calculate fields correctly for the first time") + } + + if field, ok := scope.FieldByName("Category"); !ok || field.Relationship == nil { + t.Errorf("Should calculate fields correctly for the first time") + } + + if field, ok := scope.FieldByName("embedded_name"); !ok { + t.Errorf("should find embedded field") + } else if _, ok := field.TagSettings["NOT NULL"]; !ok { + t.Errorf("should find embedded field's tag settings") + } +} diff --git a/vendor/github.com/jinzhu/gorm/join_table_test.go b/vendor/github.com/jinzhu/gorm/join_table_test.go new file mode 100644 index 000000000000..1a83a9c87034 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/join_table_test.go @@ -0,0 +1,72 @@ +package gorm_test + +import ( + "fmt" + "testing" + "time" + + "github.com/jinzhu/gorm" +) + +type Person struct { + Id int + Name string + Addresses []*Address `gorm:"many2many:person_addresses;"` +} + +type PersonAddress struct { + gorm.JoinTableHandler + PersonID int + AddressID int + DeletedAt *time.Time + CreatedAt time.Time +} + +func (*PersonAddress) Add(handler gorm.JoinTableHandlerInterface, db *gorm.DB, foreignValue interface{}, associationValue interface{}) error { + return db.Where(map[string]interface{}{ + "person_id": db.NewScope(foreignValue).PrimaryKeyValue(), + "address_id": db.NewScope(associationValue).PrimaryKeyValue(), + }).Assign(map[string]interface{}{ + "person_id": foreignValue, + "address_id": associationValue, + "deleted_at": gorm.Expr("NULL"), + }).FirstOrCreate(&PersonAddress{}).Error +} + +func (*PersonAddress) Delete(handler gorm.JoinTableHandlerInterface, db *gorm.DB, sources ...interface{}) error { + return db.Delete(&PersonAddress{}).Error +} + +func (pa *PersonAddress) JoinWith(handler gorm.JoinTableHandlerInterface, db *gorm.DB, source interface{}) *gorm.DB { + table := pa.Table(db) + return db.Joins("INNER JOIN person_addresses ON person_addresses.address_id = addresses.id").Where(fmt.Sprintf("%v.deleted_at IS NULL OR %v.deleted_at <= '0001-01-02'", table, table)) +} + +func TestJoinTable(t *testing.T) { + DB.Exec("drop table person_addresses;") + DB.AutoMigrate(&Person{}) + DB.SetJoinTableHandler(&Person{}, "Addresses", &PersonAddress{}) + + address1 := &Address{Address1: "address 1"} + address2 := &Address{Address1: "address 2"} + person := &Person{Name: "person", Addresses: []*Address{address1, address2}} + DB.Save(person) + + DB.Model(person).Association("Addresses").Delete(address1) + + if DB.Find(&[]PersonAddress{}, "person_id = ?", person.Id).RowsAffected != 1 { + t.Errorf("Should found one address") + } + + if DB.Model(person).Association("Addresses").Count() != 1 { + t.Errorf("Should found one address") + } + + if DB.Unscoped().Find(&[]PersonAddress{}, "person_id = ?", person.Id).RowsAffected != 2 { + t.Errorf("Found two addresses with Unscoped") + } + + if DB.Model(person).Association("Addresses").Clear(); DB.Model(person).Association("Addresses").Count() != 0 { + t.Errorf("Should deleted all addresses") + } +} diff --git a/vendor/github.com/jinzhu/gorm/main_test.go b/vendor/github.com/jinzhu/gorm/main_test.go new file mode 100644 index 000000000000..f76988d2aea6 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/main_test.go @@ -0,0 +1,860 @@ +package gorm_test + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "os" + "path/filepath" + "reflect" + "strconv" + "testing" + "time" + + "github.com/erikstmartin/go-testdb" + "github.com/jinzhu/gorm" + _ "github.com/jinzhu/gorm/dialects/mssql" + _ "github.com/jinzhu/gorm/dialects/mysql" + "github.com/jinzhu/gorm/dialects/postgres" + _ "github.com/jinzhu/gorm/dialects/sqlite" + "github.com/jinzhu/now" +) + +var ( + DB *gorm.DB + t1, t2, t3, t4, t5 time.Time +) + +func init() { + var err error + + if DB, err = OpenTestConnection(); err != nil { + panic(fmt.Sprintf("No error should happen when connecting to test database, but got err=%+v", err)) + } + + runMigration() +} + +func OpenTestConnection() (db *gorm.DB, err error) { + switch os.Getenv("GORM_DIALECT") { + case "mysql": + // CREATE USER 'gorm'@'localhost' IDENTIFIED BY 'gorm'; + // CREATE DATABASE gorm; + // GRANT ALL ON gorm.* TO 'gorm'@'localhost'; + fmt.Println("testing mysql...") + dbhost := os.Getenv("GORM_DBADDRESS") + if dbhost != "" { + dbhost = fmt.Sprintf("tcp(%v)", dbhost) + } + db, err = gorm.Open("mysql", fmt.Sprintf("gorm:gorm@%v/gorm?charset=utf8&parseTime=True", dbhost)) + case "postgres": + fmt.Println("testing postgres...") + dbhost := os.Getenv("GORM_DBHOST") + if dbhost != "" { + dbhost = fmt.Sprintf("host=%v ", dbhost) + } + db, err = gorm.Open("postgres", fmt.Sprintf("%vuser=gorm password=gorm DB.name=gorm sslmode=disable", dbhost)) + case "foundation": + fmt.Println("testing foundation...") + db, err = gorm.Open("foundation", "dbname=gorm port=15432 sslmode=disable") + case "mssql": + fmt.Println("testing mssql...") + db, err = gorm.Open("mssql", "server=SERVER_HERE;database=rogue;user id=USER_HERE;password=PW_HERE;port=1433") + default: + fmt.Println("testing sqlite3...") + db, err = gorm.Open("sqlite3", filepath.Join(os.TempDir(), "gorm.db")) + } + + // db.SetLogger(Logger{log.New(os.Stdout, "\r\n", 0)}) + // db.SetLogger(log.New(os.Stdout, "\r\n", 0)) + if os.Getenv("DEBUG") == "true" { + db.LogMode(true) + } + + db.DB().SetMaxIdleConns(10) + + return +} + +func TestStringPrimaryKey(t *testing.T) { + type UUIDStruct struct { + ID string `gorm:"primary_key"` + Name string + } + DB.DropTable(&UUIDStruct{}) + DB.AutoMigrate(&UUIDStruct{}) + + data := UUIDStruct{ID: "uuid", Name: "hello"} + if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" || data.Name != "hello" { + t.Errorf("string primary key should not be populated") + } + + data = UUIDStruct{ID: "uuid", Name: "hello world"} + if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" || data.Name != "hello world" { + t.Errorf("string primary key should not be populated") + } +} + +func TestExceptionsWithInvalidSql(t *testing.T) { + var columns []string + if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil { + t.Errorf("Should got error with invalid SQL") + } + + if DB.Model(&User{}).Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil { + t.Errorf("Should got error with invalid SQL") + } + + if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Find(&User{}).Error == nil { + t.Errorf("Should got error with invalid SQL") + } + + var count1, count2 int64 + DB.Model(&User{}).Count(&count1) + if count1 <= 0 { + t.Errorf("Should find some users") + } + + if DB.Where("name = ?", "jinzhu; delete * from users").First(&User{}).Error == nil { + t.Errorf("Should got error with invalid SQL") + } + + DB.Model(&User{}).Count(&count2) + if count1 != count2 { + t.Errorf("No user should not be deleted by invalid SQL") + } +} + +func TestSetTable(t *testing.T) { + DB.Create(getPreparedUser("pluck_user1", "pluck_user")) + DB.Create(getPreparedUser("pluck_user2", "pluck_user")) + DB.Create(getPreparedUser("pluck_user3", "pluck_user")) + + if err := DB.Table("users").Where("role = ?", "pluck_user").Pluck("age", &[]int{}).Error; err != nil { + t.Error("No errors should happen if set table for pluck", err) + } + + var users []User + if DB.Table("users").Find(&[]User{}).Error != nil { + t.Errorf("No errors should happen if set table for find") + } + + if DB.Table("invalid_table").Find(&users).Error == nil { + t.Errorf("Should got error when table is set to an invalid table") + } + + DB.Exec("drop table deleted_users;") + if DB.Table("deleted_users").CreateTable(&User{}).Error != nil { + t.Errorf("Create table with specified table") + } + + DB.Table("deleted_users").Save(&User{Name: "DeletedUser"}) + + var deletedUsers []User + DB.Table("deleted_users").Find(&deletedUsers) + if len(deletedUsers) != 1 { + t.Errorf("Query from specified table") + } + + DB.Save(getPreparedUser("normal_user", "reset_table")) + DB.Table("deleted_users").Save(getPreparedUser("deleted_user", "reset_table")) + var user1, user2, user3 User + DB.Where("role = ?", "reset_table").First(&user1).Table("deleted_users").First(&user2).Table("").First(&user3) + if (user1.Name != "normal_user") || (user2.Name != "deleted_user") || (user3.Name != "normal_user") { + t.Errorf("unset specified table with blank string") + } +} + +type Order struct { +} + +type Cart struct { +} + +func (c Cart) TableName() string { + return "shopping_cart" +} + +func TestHasTable(t *testing.T) { + type Foo struct { + Id int + Stuff string + } + DB.DropTable(&Foo{}) + + // Table should not exist at this point, HasTable should return false + if ok := DB.HasTable("foos"); ok { + t.Errorf("Table should not exist, but does") + } + if ok := DB.HasTable(&Foo{}); ok { + t.Errorf("Table should not exist, but does") + } + + // We create the table + if err := DB.CreateTable(&Foo{}).Error; err != nil { + t.Errorf("Table should be created") + } + + // And now it should exits, and HasTable should return true + if ok := DB.HasTable("foos"); !ok { + t.Errorf("Table should exist, but HasTable informs it does not") + } + if ok := DB.HasTable(&Foo{}); !ok { + t.Errorf("Table should exist, but HasTable informs it does not") + } +} + +func TestTableName(t *testing.T) { + DB := DB.Model("") + if DB.NewScope(Order{}).TableName() != "orders" { + t.Errorf("Order's table name should be orders") + } + + if DB.NewScope(&Order{}).TableName() != "orders" { + t.Errorf("&Order's table name should be orders") + } + + if DB.NewScope([]Order{}).TableName() != "orders" { + t.Errorf("[]Order's table name should be orders") + } + + if DB.NewScope(&[]Order{}).TableName() != "orders" { + t.Errorf("&[]Order's table name should be orders") + } + + DB.SingularTable(true) + if DB.NewScope(Order{}).TableName() != "order" { + t.Errorf("Order's singular table name should be order") + } + + if DB.NewScope(&Order{}).TableName() != "order" { + t.Errorf("&Order's singular table name should be order") + } + + if DB.NewScope([]Order{}).TableName() != "order" { + t.Errorf("[]Order's singular table name should be order") + } + + if DB.NewScope(&[]Order{}).TableName() != "order" { + t.Errorf("&[]Order's singular table name should be order") + } + + if DB.NewScope(&Cart{}).TableName() != "shopping_cart" { + t.Errorf("&Cart's singular table name should be shopping_cart") + } + + if DB.NewScope(Cart{}).TableName() != "shopping_cart" { + t.Errorf("Cart's singular table name should be shopping_cart") + } + + if DB.NewScope(&[]Cart{}).TableName() != "shopping_cart" { + t.Errorf("&[]Cart's singular table name should be shopping_cart") + } + + if DB.NewScope([]Cart{}).TableName() != "shopping_cart" { + t.Errorf("[]Cart's singular table name should be shopping_cart") + } + DB.SingularTable(false) +} + +func TestNullValues(t *testing.T) { + DB.DropTable(&NullValue{}) + DB.AutoMigrate(&NullValue{}) + + if err := DB.Save(&NullValue{ + Name: sql.NullString{String: "hello", Valid: true}, + Gender: &sql.NullString{String: "M", Valid: true}, + Age: sql.NullInt64{Int64: 18, Valid: true}, + Male: sql.NullBool{Bool: true, Valid: true}, + Height: sql.NullFloat64{Float64: 100.11, Valid: true}, + AddedAt: NullTime{Time: time.Now(), Valid: true}, + }).Error; err != nil { + t.Errorf("Not error should raise when test null value") + } + + var nv NullValue + DB.First(&nv, "name = ?", "hello") + + if nv.Name.String != "hello" || nv.Gender.String != "M" || nv.Age.Int64 != 18 || nv.Male.Bool != true || nv.Height.Float64 != 100.11 || nv.AddedAt.Valid != true { + t.Errorf("Should be able to fetch null value") + } + + if err := DB.Save(&NullValue{ + Name: sql.NullString{String: "hello-2", Valid: true}, + Gender: &sql.NullString{String: "F", Valid: true}, + Age: sql.NullInt64{Int64: 18, Valid: false}, + Male: sql.NullBool{Bool: true, Valid: true}, + Height: sql.NullFloat64{Float64: 100.11, Valid: true}, + AddedAt: NullTime{Time: time.Now(), Valid: false}, + }).Error; err != nil { + t.Errorf("Not error should raise when test null value") + } + + var nv2 NullValue + DB.First(&nv2, "name = ?", "hello-2") + if nv2.Name.String != "hello-2" || nv2.Gender.String != "F" || nv2.Age.Int64 != 0 || nv2.Male.Bool != true || nv2.Height.Float64 != 100.11 || nv2.AddedAt.Valid != false { + t.Errorf("Should be able to fetch null value") + } + + if err := DB.Save(&NullValue{ + Name: sql.NullString{String: "hello-3", Valid: false}, + Gender: &sql.NullString{String: "M", Valid: true}, + Age: sql.NullInt64{Int64: 18, Valid: false}, + Male: sql.NullBool{Bool: true, Valid: true}, + Height: sql.NullFloat64{Float64: 100.11, Valid: true}, + AddedAt: NullTime{Time: time.Now(), Valid: false}, + }).Error; err == nil { + t.Errorf("Can't save because of name can't be null") + } +} + +func TestNullValuesWithFirstOrCreate(t *testing.T) { + var nv1 = NullValue{ + Name: sql.NullString{String: "first_or_create", Valid: true}, + Gender: &sql.NullString{String: "M", Valid: true}, + } + + var nv2 NullValue + result := DB.Where(nv1).FirstOrCreate(&nv2) + + if result.RowsAffected != 1 { + t.Errorf("RowsAffected should be 1 after create some record") + } + + if result.Error != nil { + t.Errorf("Should not raise any error, but got %v", result.Error) + } + + if nv2.Name.String != "first_or_create" || nv2.Gender.String != "M" { + t.Errorf("first or create with nullvalues") + } + + if err := DB.Where(nv1).Assign(NullValue{Age: sql.NullInt64{Int64: 18, Valid: true}}).FirstOrCreate(&nv2).Error; err != nil { + t.Errorf("Should not raise any error, but got %v", err) + } + + if nv2.Age.Int64 != 18 { + t.Errorf("should update age to 18") + } +} + +func TestTransaction(t *testing.T) { + tx := DB.Begin() + u := User{Name: "transcation"} + if err := tx.Save(&u).Error; err != nil { + t.Errorf("No error should raise") + } + + if err := tx.First(&User{}, "name = ?", "transcation").Error; err != nil { + t.Errorf("Should find saved record") + } + + if sqlTx, ok := tx.CommonDB().(*sql.Tx); !ok || sqlTx == nil { + t.Errorf("Should return the underlying sql.Tx") + } + + tx.Rollback() + + if err := tx.First(&User{}, "name = ?", "transcation").Error; err == nil { + t.Errorf("Should not find record after rollback") + } + + tx2 := DB.Begin() + u2 := User{Name: "transcation-2"} + if err := tx2.Save(&u2).Error; err != nil { + t.Errorf("No error should raise") + } + + if err := tx2.First(&User{}, "name = ?", "transcation-2").Error; err != nil { + t.Errorf("Should find saved record") + } + + tx2.Commit() + + if err := DB.First(&User{}, "name = ?", "transcation-2").Error; err != nil { + t.Errorf("Should be able to find committed record") + } +} + +func TestRow(t *testing.T) { + user1 := User{Name: "RowUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "RowUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "RowUser3", Age: 20, Birthday: parseTime("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + row := DB.Table("users").Where("name = ?", user2.Name).Select("age").Row() + var age int64 + row.Scan(&age) + if age != 10 { + t.Errorf("Scan with Row") + } +} + +func TestRows(t *testing.T) { + user1 := User{Name: "RowsUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "RowsUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "RowsUser3", Age: 20, Birthday: parseTime("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows() + if err != nil { + t.Errorf("Not error should happen, got %v", err) + } + + count := 0 + for rows.Next() { + var name string + var age int64 + rows.Scan(&name, &age) + count++ + } + + if count != 2 { + t.Errorf("Should found two records") + } +} + +func TestScanRows(t *testing.T) { + user1 := User{Name: "ScanRowsUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "ScanRowsUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "ScanRowsUser3", Age: 20, Birthday: parseTime("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows() + if err != nil { + t.Errorf("Not error should happen, got %v", err) + } + + type Result struct { + Name string + Age int + } + + var results []Result + for rows.Next() { + var result Result + if err := DB.ScanRows(rows, &result); err != nil { + t.Errorf("should get no error, but got %v", err) + } + results = append(results, result) + } + + if !reflect.DeepEqual(results, []Result{{Name: "ScanRowsUser2", Age: 10}, {Name: "ScanRowsUser3", Age: 20}}) { + t.Errorf("Should find expected results") + } +} + +func TestScan(t *testing.T) { + user1 := User{Name: "ScanUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "ScanUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "ScanUser3", Age: 20, Birthday: parseTime("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + type result struct { + Name string + Age int + } + + var res result + DB.Table("users").Select("name, age").Where("name = ?", user3.Name).Scan(&res) + if res.Name != user3.Name { + t.Errorf("Scan into struct should work") + } + + var doubleAgeRes = &result{} + if err := DB.Table("users").Select("age + age as age").Where("name = ?", user3.Name).Scan(&doubleAgeRes).Error; err != nil { + t.Errorf("Scan to pointer of pointer") + } + if doubleAgeRes.Age != res.Age*2 { + t.Errorf("Scan double age as age") + } + + var ress []result + DB.Table("users").Select("name, age").Where("name in (?)", []string{user2.Name, user3.Name}).Scan(&ress) + if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name { + t.Errorf("Scan into struct map") + } +} + +func TestRaw(t *testing.T) { + user1 := User{Name: "ExecRawSqlUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "ExecRawSqlUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "ExecRawSqlUser3", Age: 20, Birthday: parseTime("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + type result struct { + Name string + Email string + } + + var ress []result + DB.Raw("SELECT name, age FROM users WHERE name = ? or name = ?", user2.Name, user3.Name).Scan(&ress) + if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name { + t.Errorf("Raw with scan") + } + + rows, _ := DB.Raw("select name, age from users where name = ?", user3.Name).Rows() + count := 0 + for rows.Next() { + count++ + } + if count != 1 { + t.Errorf("Raw with Rows should find one record with name 3") + } + + DB.Exec("update users set name=? where name in (?)", "jinzhu", []string{user1.Name, user2.Name, user3.Name}) + if DB.Where("name in (?)", []string{user1.Name, user2.Name, user3.Name}).First(&User{}).Error != gorm.ErrRecordNotFound { + t.Error("Raw sql to update records") + } +} + +func TestGroup(t *testing.T) { + rows, err := DB.Select("name").Table("users").Group("name").Rows() + + if err == nil { + defer rows.Close() + for rows.Next() { + var name string + rows.Scan(&name) + } + } else { + t.Errorf("Should not raise any error") + } +} + +func TestJoins(t *testing.T) { + var user = User{ + Name: "joins", + CreditCard: CreditCard{Number: "411111111111"}, + Emails: []Email{{Email: "join1@example.com"}, {Email: "join2@example.com"}}, + } + DB.Save(&user) + + var users1 []User + DB.Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins").Find(&users1) + if len(users1) != 2 { + t.Errorf("should find two users using left join") + } + + var users2 []User + DB.Joins("left join emails on emails.user_id = users.id AND emails.email = ?", "join1@example.com").Where("name = ?", "joins").First(&users2) + if len(users2) != 1 { + t.Errorf("should find one users using left join with conditions") + } + + var users3 []User + DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "join1@example.com").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "411111111111").Where("name = ?", "joins").First(&users3) + if len(users3) != 1 { + t.Errorf("should find one users using multiple left join conditions") + } + + var users4 []User + DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "join1@example.com").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "422222222222").Where("name = ?", "joins").First(&users4) + if len(users4) != 0 { + t.Errorf("should find no user when searching with unexisting credit card") + } + + var users5 []User + db5 := DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "join1@example.com").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "411111111111").Where(User{Id: 1}).Where(Email{Id: 1}).Not(Email{Id: 10}).First(&users5) + if db5.Error != nil { + t.Errorf("Should not raise error for join where identical fields in different tables. Error: %s", db5.Error.Error()) + } +} + +func TestJoinsWithSelect(t *testing.T) { + type result struct { + Name string + Email string + } + + user := User{ + Name: "joins_with_select", + Emails: []Email{{Email: "join1@example.com"}, {Email: "join2@example.com"}}, + } + DB.Save(&user) + + var results []result + DB.Table("users").Select("name, emails.email").Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins_with_select").Scan(&results) + if len(results) != 2 || results[0].Email != "join1@example.com" || results[1].Email != "join2@example.com" { + t.Errorf("Should find all two emails with Join select") + } +} + +func TestHaving(t *testing.T) { + rows, err := DB.Select("name, count(*) as total").Table("users").Group("name").Having("name IN (?)", []string{"2", "3"}).Rows() + + if err == nil { + defer rows.Close() + for rows.Next() { + var name string + var total int64 + rows.Scan(&name, &total) + + if name == "2" && total != 1 { + t.Errorf("Should have one user having name 2") + } + if name == "3" && total != 2 { + t.Errorf("Should have two users having name 3") + } + } + } else { + t.Errorf("Should not raise any error") + } +} + +func DialectHasTzSupport() bool { + // NB: mssql and FoundationDB do not support time zones. + if dialect := os.Getenv("GORM_DIALECT"); dialect == "mssql" || dialect == "foundation" { + return false + } + return true +} + +func TestTimeWithZone(t *testing.T) { + var format = "2006-01-02 15:04:05 -0700" + var times []time.Time + GMT8, _ := time.LoadLocation("Asia/Shanghai") + times = append(times, time.Date(2013, 02, 19, 1, 51, 49, 123456789, GMT8)) + times = append(times, time.Date(2013, 02, 18, 17, 51, 49, 123456789, time.UTC)) + + for index, vtime := range times { + name := "time_with_zone_" + strconv.Itoa(index) + user := User{Name: name, Birthday: &vtime} + + if !DialectHasTzSupport() { + // If our driver dialect doesn't support TZ's, just use UTC for everything here. + utcBirthday := user.Birthday.UTC() + user.Birthday = &utcBirthday + } + + DB.Save(&user) + expectedBirthday := "2013-02-18 17:51:49 +0000" + foundBirthday := user.Birthday.UTC().Format(format) + if foundBirthday != expectedBirthday { + t.Errorf("User's birthday should not be changed after save for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday) + } + + var findUser, findUser2, findUser3 User + DB.First(&findUser, "name = ?", name) + foundBirthday = findUser.Birthday.UTC().Format(format) + if foundBirthday != expectedBirthday { + t.Errorf("User's birthday should not be changed after find for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday) + } + + if DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(-time.Minute)).First(&findUser2).RecordNotFound() { + t.Errorf("User should be found") + } + + if !DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(time.Minute)).First(&findUser3).RecordNotFound() { + t.Errorf("User should not be found") + } + } +} + +func TestHstore(t *testing.T) { + type Details struct { + Id int64 + Bulk postgres.Hstore + } + + if dialect := os.Getenv("GORM_DIALECT"); dialect != "postgres" { + t.Skip() + } + + if err := DB.Exec("CREATE EXTENSION IF NOT EXISTS hstore").Error; err != nil { + fmt.Println("\033[31mHINT: Must be superuser to create hstore extension (ALTER USER gorm WITH SUPERUSER;)\033[0m") + panic(fmt.Sprintf("No error should happen when create hstore extension, but got %+v", err)) + } + + DB.Exec("drop table details") + + if err := DB.CreateTable(&Details{}).Error; err != nil { + panic(fmt.Sprintf("No error should happen when create table, but got %+v", err)) + } + + bankAccountId, phoneNumber, opinion := "123456", "14151321232", "sharkbait" + bulk := map[string]*string{ + "bankAccountId": &bankAccountId, + "phoneNumber": &phoneNumber, + "opinion": &opinion, + } + d := Details{Bulk: bulk} + DB.Save(&d) + + var d2 Details + if err := DB.First(&d2).Error; err != nil { + t.Errorf("Got error when tried to fetch details: %+v", err) + } + + for k := range bulk { + if r, ok := d2.Bulk[k]; ok { + if res, _ := bulk[k]; *res != *r { + t.Errorf("Details should be equal") + } + } else { + t.Errorf("Details should be existed") + } + } +} + +func TestSetAndGet(t *testing.T) { + if value, ok := DB.Set("hello", "world").Get("hello"); !ok { + t.Errorf("Should be able to get setting after set") + } else { + if value.(string) != "world" { + t.Errorf("Setted value should not be changed") + } + } + + if _, ok := DB.Get("non_existing"); ok { + t.Errorf("Get non existing key should return error") + } +} + +func TestCompatibilityMode(t *testing.T) { + DB, _ := gorm.Open("testdb", "") + testdb.SetQueryFunc(func(query string) (driver.Rows, error) { + columns := []string{"id", "name", "age"} + result := ` + 1,Tim,20 + 2,Joe,25 + 3,Bob,30 + ` + return testdb.RowsFromCSVString(columns, result), nil + }) + + var users []User + DB.Find(&users) + if (users[0].Name != "Tim") || len(users) != 3 { + t.Errorf("Unexcepted result returned") + } +} + +func TestOpenExistingDB(t *testing.T) { + DB.Save(&User{Name: "jnfeinstein"}) + dialect := os.Getenv("GORM_DIALECT") + + db, err := gorm.Open(dialect, DB.DB()) + if err != nil { + t.Errorf("Should have wrapped the existing DB connection") + } + + var user User + if db.Where("name = ?", "jnfeinstein").First(&user).Error == gorm.ErrRecordNotFound { + t.Errorf("Should have found existing record") + } +} + +func TestDdlErrors(t *testing.T) { + var err error + + if err = DB.Close(); err != nil { + t.Errorf("Closing DDL test db connection err=%s", err) + } + defer func() { + // Reopen DB connection. + if DB, err = OpenTestConnection(); err != nil { + t.Fatalf("Failed re-opening db connection: %s", err) + } + }() + + if err := DB.Find(&User{}).Error; err == nil { + t.Errorf("Expected operation on closed db to produce an error, but err was nil") + } +} + +func TestOpenWithOneParameter(t *testing.T) { + db, err := gorm.Open("dialect") + if db != nil { + t.Error("Open with one parameter returned non nil for db") + } + if err == nil { + t.Error("Open with one parameter returned err as nil") + } +} + +func TestBlockGlobalUpdate(t *testing.T) { + db := DB.New() + db.Create(&Toy{Name: "Stuffed Animal", OwnerType: "Nobody"}) + + err := db.Model(&Toy{}).Update("OwnerType", "Human").Error + if err != nil { + t.Error("Unexpected error on global update") + } + + err = db.Delete(&Toy{}).Error + if err != nil { + t.Error("Unexpected error on global delete") + } + + db.BlockGlobalUpdate(true) + + db.Create(&Toy{Name: "Stuffed Animal", OwnerType: "Nobody"}) + + err = db.Model(&Toy{}).Update("OwnerType", "Human").Error + if err == nil { + t.Error("Expected error on global update") + } + + err = db.Model(&Toy{}).Where(&Toy{OwnerType: "Martian"}).Update("OwnerType", "Astronaut").Error + if err != nil { + t.Error("Unxpected error on conditional update") + } + + err = db.Delete(&Toy{}).Error + if err == nil { + t.Error("Expected error on global delete") + } + err = db.Where(&Toy{OwnerType: "Martian"}).Delete(&Toy{}).Error + if err != nil { + t.Error("Unexpected error on conditional delete") + } +} + +func BenchmarkGorm(b *testing.B) { + b.N = 2000 + for x := 0; x < b.N; x++ { + e := strconv.Itoa(x) + "benchmark@example.org" + now := time.Now() + email := BigEmail{Email: e, UserAgent: "pc", RegisteredAt: &now} + // Insert + DB.Save(&email) + // Query + DB.First(&BigEmail{}, "email = ?", e) + // Update + DB.Model(&email).UpdateColumn("email", "new-"+e) + // Delete + DB.Delete(&email) + } +} + +func BenchmarkRawSql(b *testing.B) { + DB, _ := sql.Open("postgres", "user=gorm DB.ame=gorm sslmode=disable") + DB.SetMaxIdleConns(10) + insertSql := "INSERT INTO emails (user_id,email,user_agent,registered_at,created_at,updated_at) VALUES ($1,$2,$3,$4,$5,$6) RETURNING id" + querySql := "SELECT * FROM emails WHERE email = $1 ORDER BY id LIMIT 1" + updateSql := "UPDATE emails SET email = $1, updated_at = $2 WHERE id = $3" + deleteSql := "DELETE FROM orders WHERE id = $1" + + b.N = 2000 + for x := 0; x < b.N; x++ { + var id int64 + e := strconv.Itoa(x) + "benchmark@example.org" + now := time.Now() + email := BigEmail{Email: e, UserAgent: "pc", RegisteredAt: &now} + // Insert + DB.QueryRow(insertSql, email.UserId, email.Email, email.UserAgent, email.RegisteredAt, time.Now(), time.Now()).Scan(&id) + // Query + rows, _ := DB.Query(querySql, email.Email) + rows.Close() + // Update + DB.Exec(updateSql, "new-"+e, time.Now(), id) + // Delete + DB.Exec(deleteSql, id) + } +} + +func parseTime(str string) *time.Time { + t := now.MustParse(str) + return &t +} diff --git a/vendor/github.com/jinzhu/gorm/migration_test.go b/vendor/github.com/jinzhu/gorm/migration_test.go new file mode 100644 index 000000000000..95c2c57146de --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/migration_test.go @@ -0,0 +1,438 @@ +package gorm_test + +import ( + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "reflect" + "testing" + "time" + + "github.com/jinzhu/gorm" +) + +type User struct { + Id int64 + Age int64 + UserNum Num + Name string `sql:"size:255"` + Email string + Birthday *time.Time // Time + CreatedAt time.Time // CreatedAt: Time of record is created, will be insert automatically + UpdatedAt time.Time // UpdatedAt: Time of record is updated, will be updated automatically + Emails []Email // Embedded structs + BillingAddress Address // Embedded struct + BillingAddressID sql.NullInt64 // Embedded struct's foreign key + ShippingAddress Address // Embedded struct + ShippingAddressId int64 // Embedded struct's foreign key + CreditCard CreditCard + Latitude float64 + Languages []Language `gorm:"many2many:user_languages;"` + CompanyID *int + Company Company + Role + PasswordHash []byte + Sequence uint `gorm:"AUTO_INCREMENT"` + IgnoreMe int64 `sql:"-"` + IgnoreStringSlice []string `sql:"-"` + Ignored struct{ Name string } `sql:"-"` + IgnoredPointer *User `sql:"-"` +} + +type NotSoLongTableName struct { + Id int64 + ReallyLongThingID int64 + ReallyLongThing ReallyLongTableNameToTestMySQLNameLengthLimit +} + +type ReallyLongTableNameToTestMySQLNameLengthLimit struct { + Id int64 +} + +type ReallyLongThingThatReferencesShort struct { + Id int64 + ShortID int64 + Short Short +} + +type Short struct { + Id int64 +} + +type CreditCard struct { + ID int8 + Number string + UserId sql.NullInt64 + CreatedAt time.Time `sql:"not null"` + UpdatedAt time.Time + DeletedAt *time.Time `sql:"column:deleted_time"` +} + +type Email struct { + Id int16 + UserId int + Email string `sql:"type:varchar(100);"` + CreatedAt time.Time + UpdatedAt time.Time +} + +type Address struct { + ID int + Address1 string + Address2 string + Post string + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt *time.Time +} + +type Language struct { + gorm.Model + Name string + Users []User `gorm:"many2many:user_languages;"` +} + +type Product struct { + Id int64 + Code string + Price int64 + CreatedAt time.Time + UpdatedAt time.Time + AfterFindCallTimes int64 + BeforeCreateCallTimes int64 + AfterCreateCallTimes int64 + BeforeUpdateCallTimes int64 + AfterUpdateCallTimes int64 + BeforeSaveCallTimes int64 + AfterSaveCallTimes int64 + BeforeDeleteCallTimes int64 + AfterDeleteCallTimes int64 +} + +type Company struct { + Id int64 + Name string + Owner *User `sql:"-"` +} + +type Role struct { + Name string `gorm:"size:256"` +} + +func (role *Role) Scan(value interface{}) error { + if b, ok := value.([]uint8); ok { + role.Name = string(b) + } else { + role.Name = value.(string) + } + return nil +} + +func (role Role) Value() (driver.Value, error) { + return role.Name, nil +} + +func (role Role) IsAdmin() bool { + return role.Name == "admin" +} + +type Num int64 + +func (i *Num) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + case int64: + *i = Num(s) + default: + return errors.New("Cannot scan NamedInt from " + reflect.ValueOf(src).String()) + } + return nil +} + +type Animal struct { + Counter uint64 `gorm:"primary_key:yes"` + Name string `sql:"DEFAULT:'galeone'"` + From string //test reserved sql keyword as field name + Age time.Time `sql:"DEFAULT:current_timestamp"` + unexported string // unexported value + CreatedAt time.Time + UpdatedAt time.Time +} + +type JoinTable struct { + From uint64 + To uint64 + Time time.Time `sql:"default: null"` +} + +type Post struct { + Id int64 + CategoryId sql.NullInt64 + MainCategoryId int64 + Title string + Body string + Comments []*Comment + Category Category + MainCategory Category +} + +type Category struct { + gorm.Model + Name string + + Categories []Category + CategoryID *uint +} + +type Comment struct { + gorm.Model + PostId int64 + Content string + Post Post +} + +// Scanner +type NullValue struct { + Id int64 + Name sql.NullString `sql:"not null"` + Gender *sql.NullString `sql:"not null"` + Age sql.NullInt64 + Male sql.NullBool + Height sql.NullFloat64 + AddedAt NullTime +} + +type NullTime struct { + Time time.Time + Valid bool +} + +func (nt *NullTime) Scan(value interface{}) error { + if value == nil { + nt.Valid = false + return nil + } + nt.Time, nt.Valid = value.(time.Time), true + return nil +} + +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} + +func getPreparedUser(name string, role string) *User { + var company Company + DB.Where(Company{Name: role}).FirstOrCreate(&company) + + return &User{ + Name: name, + Age: 20, + Role: Role{role}, + BillingAddress: Address{Address1: fmt.Sprintf("Billing Address %v", name)}, + ShippingAddress: Address{Address1: fmt.Sprintf("Shipping Address %v", name)}, + CreditCard: CreditCard{Number: fmt.Sprintf("123456%v", name)}, + Emails: []Email{ + {Email: fmt.Sprintf("user_%v@example1.com", name)}, {Email: fmt.Sprintf("user_%v@example2.com", name)}, + }, + Company: company, + Languages: []Language{ + {Name: fmt.Sprintf("lang_1_%v", name)}, + {Name: fmt.Sprintf("lang_2_%v", name)}, + }, + } +} + +func runMigration() { + if err := DB.DropTableIfExists(&User{}).Error; err != nil { + fmt.Printf("Got error when try to delete table users, %+v\n", err) + } + + for _, table := range []string{"animals", "user_languages"} { + DB.Exec(fmt.Sprintf("drop table %v;", table)) + } + + values := []interface{}{&Short{}, &ReallyLongThingThatReferencesShort{}, &ReallyLongTableNameToTestMySQLNameLengthLimit{}, &NotSoLongTableName{}, &Product{}, &Email{}, &Address{}, &CreditCard{}, &Company{}, &Role{}, &Language{}, &HNPost{}, &EngadgetPost{}, &Animal{}, &User{}, &JoinTable{}, &Post{}, &Category{}, &Comment{}, &Cat{}, &Dog{}, &Hamster{}, &Toy{}, &ElementWithIgnoredField{}} + for _, value := range values { + DB.DropTable(value) + } + if err := DB.AutoMigrate(values...).Error; err != nil { + panic(fmt.Sprintf("No error should happen when create table, but got %+v", err)) + } +} + +func TestIndexes(t *testing.T) { + if err := DB.Model(&Email{}).AddIndex("idx_email_email", "email").Error; err != nil { + t.Errorf("Got error when tried to create index: %+v", err) + } + + scope := DB.NewScope(&Email{}) + if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_email") { + t.Errorf("Email should have index idx_email_email") + } + + if err := DB.Model(&Email{}).RemoveIndex("idx_email_email").Error; err != nil { + t.Errorf("Got error when tried to remove index: %+v", err) + } + + if scope.Dialect().HasIndex(scope.TableName(), "idx_email_email") { + t.Errorf("Email's index idx_email_email should be deleted") + } + + if err := DB.Model(&Email{}).AddIndex("idx_email_email_and_user_id", "user_id", "email").Error; err != nil { + t.Errorf("Got error when tried to create index: %+v", err) + } + + if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") { + t.Errorf("Email should have index idx_email_email_and_user_id") + } + + if err := DB.Model(&Email{}).RemoveIndex("idx_email_email_and_user_id").Error; err != nil { + t.Errorf("Got error when tried to remove index: %+v", err) + } + + if scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") { + t.Errorf("Email's index idx_email_email_and_user_id should be deleted") + } + + if err := DB.Model(&Email{}).AddUniqueIndex("idx_email_email_and_user_id", "user_id", "email").Error; err != nil { + t.Errorf("Got error when tried to create index: %+v", err) + } + + if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") { + t.Errorf("Email should have index idx_email_email_and_user_id") + } + + if DB.Save(&User{Name: "unique_indexes", Emails: []Email{{Email: "user1@example.comiii"}, {Email: "user1@example.com"}, {Email: "user1@example.com"}}}).Error == nil { + t.Errorf("Should get to create duplicate record when having unique index") + } + + var user = User{Name: "sample_user"} + DB.Save(&user) + if DB.Model(&user).Association("Emails").Append(Email{Email: "not-1duplicated@gmail.com"}, Email{Email: "not-duplicated2@gmail.com"}).Error != nil { + t.Errorf("Should get no error when append two emails for user") + } + + if DB.Model(&user).Association("Emails").Append(Email{Email: "duplicated@gmail.com"}, Email{Email: "duplicated@gmail.com"}).Error == nil { + t.Errorf("Should get no duplicated email error when insert duplicated emails for a user") + } + + if err := DB.Model(&Email{}).RemoveIndex("idx_email_email_and_user_id").Error; err != nil { + t.Errorf("Got error when tried to remove index: %+v", err) + } + + if scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") { + t.Errorf("Email's index idx_email_email_and_user_id should be deleted") + } + + if DB.Save(&User{Name: "unique_indexes", Emails: []Email{{Email: "user1@example.com"}, {Email: "user1@example.com"}}}).Error != nil { + t.Errorf("Should be able to create duplicated emails after remove unique index") + } +} + +type BigEmail struct { + Id int64 + UserId int64 + Email string `sql:"index:idx_email_agent"` + UserAgent string `sql:"index:idx_email_agent"` + RegisteredAt *time.Time `sql:"unique_index"` + CreatedAt time.Time + UpdatedAt time.Time +} + +func (b BigEmail) TableName() string { + return "emails" +} + +func TestAutoMigration(t *testing.T) { + DB.AutoMigrate(&Address{}) + if err := DB.Table("emails").AutoMigrate(&BigEmail{}).Error; err != nil { + t.Errorf("Auto Migrate should not raise any error") + } + + now := time.Now() + DB.Save(&BigEmail{Email: "jinzhu@example.org", UserAgent: "pc", RegisteredAt: &now}) + + scope := DB.NewScope(&BigEmail{}) + if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_agent") { + t.Errorf("Failed to create index") + } + + if !scope.Dialect().HasIndex(scope.TableName(), "uix_emails_registered_at") { + t.Errorf("Failed to create index") + } + + var bigemail BigEmail + DB.First(&bigemail, "user_agent = ?", "pc") + if bigemail.Email != "jinzhu@example.org" || bigemail.UserAgent != "pc" || bigemail.RegisteredAt.IsZero() { + t.Error("Big Emails should be saved and fetched correctly") + } +} + +type MultipleIndexes struct { + ID int64 + UserID int64 `sql:"unique_index:uix_multipleindexes_user_name,uix_multipleindexes_user_email;index:idx_multipleindexes_user_other"` + Name string `sql:"unique_index:uix_multipleindexes_user_name"` + Email string `sql:"unique_index:,uix_multipleindexes_user_email"` + Other string `sql:"index:,idx_multipleindexes_user_other"` +} + +func TestMultipleIndexes(t *testing.T) { + if err := DB.DropTableIfExists(&MultipleIndexes{}).Error; err != nil { + fmt.Printf("Got error when try to delete table multiple_indexes, %+v\n", err) + } + + DB.AutoMigrate(&MultipleIndexes{}) + if err := DB.AutoMigrate(&BigEmail{}).Error; err != nil { + t.Errorf("Auto Migrate should not raise any error") + } + + DB.Save(&MultipleIndexes{UserID: 1, Name: "jinzhu", Email: "jinzhu@example.org", Other: "foo"}) + + scope := DB.NewScope(&MultipleIndexes{}) + if !scope.Dialect().HasIndex(scope.TableName(), "uix_multipleindexes_user_name") { + t.Errorf("Failed to create index") + } + + if !scope.Dialect().HasIndex(scope.TableName(), "uix_multipleindexes_user_email") { + t.Errorf("Failed to create index") + } + + if !scope.Dialect().HasIndex(scope.TableName(), "uix_multiple_indexes_email") { + t.Errorf("Failed to create index") + } + + if !scope.Dialect().HasIndex(scope.TableName(), "idx_multipleindexes_user_other") { + t.Errorf("Failed to create index") + } + + if !scope.Dialect().HasIndex(scope.TableName(), "idx_multiple_indexes_other") { + t.Errorf("Failed to create index") + } + + var mutipleIndexes MultipleIndexes + DB.First(&mutipleIndexes, "name = ?", "jinzhu") + if mutipleIndexes.Email != "jinzhu@example.org" || mutipleIndexes.Name != "jinzhu" { + t.Error("MutipleIndexes should be saved and fetched correctly") + } + + // Check unique constraints + if err := DB.Save(&MultipleIndexes{UserID: 1, Name: "name1", Email: "jinzhu@example.org", Other: "foo"}).Error; err == nil { + t.Error("MultipleIndexes unique index failed") + } + + if err := DB.Save(&MultipleIndexes{UserID: 1, Name: "name1", Email: "foo@example.org", Other: "foo"}).Error; err != nil { + t.Error("MultipleIndexes unique index failed") + } + + if err := DB.Save(&MultipleIndexes{UserID: 2, Name: "name1", Email: "jinzhu@example.org", Other: "foo"}).Error; err == nil { + t.Error("MultipleIndexes unique index failed") + } + + if err := DB.Save(&MultipleIndexes{UserID: 2, Name: "name1", Email: "foo2@example.org", Other: "foo"}).Error; err != nil { + t.Error("MultipleIndexes unique index failed") + } +} diff --git a/vendor/github.com/jinzhu/gorm/multi_primary_keys_test.go b/vendor/github.com/jinzhu/gorm/multi_primary_keys_test.go new file mode 100644 index 000000000000..8b275d182b27 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/multi_primary_keys_test.go @@ -0,0 +1,381 @@ +package gorm_test + +import ( + "os" + "reflect" + "sort" + "testing" +) + +type Blog struct { + ID uint `gorm:"primary_key"` + Locale string `gorm:"primary_key"` + Subject string + Body string + Tags []Tag `gorm:"many2many:blog_tags;"` + SharedTags []Tag `gorm:"many2many:shared_blog_tags;ForeignKey:id;AssociationForeignKey:id"` + LocaleTags []Tag `gorm:"many2many:locale_blog_tags;ForeignKey:id,locale;AssociationForeignKey:id"` +} + +type Tag struct { + ID uint `gorm:"primary_key"` + Locale string `gorm:"primary_key"` + Value string + Blogs []*Blog `gorm:"many2many:blogs_tags"` +} + +func compareTags(tags []Tag, contents []string) bool { + var tagContents []string + for _, tag := range tags { + tagContents = append(tagContents, tag.Value) + } + sort.Strings(tagContents) + sort.Strings(contents) + return reflect.DeepEqual(tagContents, contents) +} + +func TestManyToManyWithMultiPrimaryKeys(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect != "" && dialect != "sqlite" { + DB.DropTable(&Blog{}, &Tag{}) + DB.DropTable("blog_tags") + DB.CreateTable(&Blog{}, &Tag{}) + blog := Blog{ + Locale: "ZH", + Subject: "subject", + Body: "body", + Tags: []Tag{ + {Locale: "ZH", Value: "tag1"}, + {Locale: "ZH", Value: "tag2"}, + }, + } + + DB.Save(&blog) + if !compareTags(blog.Tags, []string{"tag1", "tag2"}) { + t.Errorf("Blog should has two tags") + } + + // Append + var tag3 = &Tag{Locale: "ZH", Value: "tag3"} + DB.Model(&blog).Association("Tags").Append([]*Tag{tag3}) + if !compareTags(blog.Tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Blog should has three tags after Append") + } + + if DB.Model(&blog).Association("Tags").Count() != 3 { + t.Errorf("Blog should has three tags after Append") + } + + var tags []Tag + DB.Model(&blog).Related(&tags, "Tags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Should find 3 tags with Related") + } + + var blog1 Blog + DB.Preload("Tags").Find(&blog1) + if !compareTags(blog1.Tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Preload many2many relations") + } + + // Replace + var tag5 = &Tag{Locale: "ZH", Value: "tag5"} + var tag6 = &Tag{Locale: "ZH", Value: "tag6"} + DB.Model(&blog).Association("Tags").Replace(tag5, tag6) + var tags2 []Tag + DB.Model(&blog).Related(&tags2, "Tags") + if !compareTags(tags2, []string{"tag5", "tag6"}) { + t.Errorf("Should find 2 tags after Replace") + } + + if DB.Model(&blog).Association("Tags").Count() != 2 { + t.Errorf("Blog should has three tags after Replace") + } + + // Delete + DB.Model(&blog).Association("Tags").Delete(tag5) + var tags3 []Tag + DB.Model(&blog).Related(&tags3, "Tags") + if !compareTags(tags3, []string{"tag6"}) { + t.Errorf("Should find 1 tags after Delete") + } + + if DB.Model(&blog).Association("Tags").Count() != 1 { + t.Errorf("Blog should has three tags after Delete") + } + + DB.Model(&blog).Association("Tags").Delete(tag3) + var tags4 []Tag + DB.Model(&blog).Related(&tags4, "Tags") + if !compareTags(tags4, []string{"tag6"}) { + t.Errorf("Tag should not be deleted when Delete with a unrelated tag") + } + + // Clear + DB.Model(&blog).Association("Tags").Clear() + if DB.Model(&blog).Association("Tags").Count() != 0 { + t.Errorf("All tags should be cleared") + } + } +} + +func TestManyToManyWithCustomizedForeignKeys(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect != "" && dialect != "sqlite" { + DB.DropTable(&Blog{}, &Tag{}) + DB.DropTable("shared_blog_tags") + DB.CreateTable(&Blog{}, &Tag{}) + blog := Blog{ + Locale: "ZH", + Subject: "subject", + Body: "body", + SharedTags: []Tag{ + {Locale: "ZH", Value: "tag1"}, + {Locale: "ZH", Value: "tag2"}, + }, + } + DB.Save(&blog) + + blog2 := Blog{ + ID: blog.ID, + Locale: "EN", + } + DB.Create(&blog2) + + if !compareTags(blog.SharedTags, []string{"tag1", "tag2"}) { + t.Errorf("Blog should has two tags") + } + + // Append + var tag3 = &Tag{Locale: "ZH", Value: "tag3"} + DB.Model(&blog).Association("SharedTags").Append([]*Tag{tag3}) + if !compareTags(blog.SharedTags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Blog should has three tags after Append") + } + + if DB.Model(&blog).Association("SharedTags").Count() != 3 { + t.Errorf("Blog should has three tags after Append") + } + + if DB.Model(&blog2).Association("SharedTags").Count() != 3 { + t.Errorf("Blog should has three tags after Append") + } + + var tags []Tag + DB.Model(&blog).Related(&tags, "SharedTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Should find 3 tags with Related") + } + + DB.Model(&blog2).Related(&tags, "SharedTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Should find 3 tags with Related") + } + + var blog1 Blog + DB.Preload("SharedTags").Find(&blog1) + if !compareTags(blog1.SharedTags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Preload many2many relations") + } + + var tag4 = &Tag{Locale: "ZH", Value: "tag4"} + DB.Model(&blog2).Association("SharedTags").Append(tag4) + + DB.Model(&blog).Related(&tags, "SharedTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3", "tag4"}) { + t.Errorf("Should find 3 tags with Related") + } + + DB.Model(&blog2).Related(&tags, "SharedTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3", "tag4"}) { + t.Errorf("Should find 3 tags with Related") + } + + // Replace + var tag5 = &Tag{Locale: "ZH", Value: "tag5"} + var tag6 = &Tag{Locale: "ZH", Value: "tag6"} + DB.Model(&blog2).Association("SharedTags").Replace(tag5, tag6) + var tags2 []Tag + DB.Model(&blog).Related(&tags2, "SharedTags") + if !compareTags(tags2, []string{"tag5", "tag6"}) { + t.Errorf("Should find 2 tags after Replace") + } + + DB.Model(&blog2).Related(&tags2, "SharedTags") + if !compareTags(tags2, []string{"tag5", "tag6"}) { + t.Errorf("Should find 2 tags after Replace") + } + + if DB.Model(&blog).Association("SharedTags").Count() != 2 { + t.Errorf("Blog should has three tags after Replace") + } + + // Delete + DB.Model(&blog).Association("SharedTags").Delete(tag5) + var tags3 []Tag + DB.Model(&blog).Related(&tags3, "SharedTags") + if !compareTags(tags3, []string{"tag6"}) { + t.Errorf("Should find 1 tags after Delete") + } + + if DB.Model(&blog).Association("SharedTags").Count() != 1 { + t.Errorf("Blog should has three tags after Delete") + } + + DB.Model(&blog2).Association("SharedTags").Delete(tag3) + var tags4 []Tag + DB.Model(&blog).Related(&tags4, "SharedTags") + if !compareTags(tags4, []string{"tag6"}) { + t.Errorf("Tag should not be deleted when Delete with a unrelated tag") + } + + // Clear + DB.Model(&blog2).Association("SharedTags").Clear() + if DB.Model(&blog).Association("SharedTags").Count() != 0 { + t.Errorf("All tags should be cleared") + } + } +} + +func TestManyToManyWithCustomizedForeignKeys2(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect != "" && dialect != "sqlite" { + DB.DropTable(&Blog{}, &Tag{}) + DB.DropTable("locale_blog_tags") + DB.CreateTable(&Blog{}, &Tag{}) + blog := Blog{ + Locale: "ZH", + Subject: "subject", + Body: "body", + LocaleTags: []Tag{ + {Locale: "ZH", Value: "tag1"}, + {Locale: "ZH", Value: "tag2"}, + }, + } + DB.Save(&blog) + + blog2 := Blog{ + ID: blog.ID, + Locale: "EN", + } + DB.Create(&blog2) + + // Append + var tag3 = &Tag{Locale: "ZH", Value: "tag3"} + DB.Model(&blog).Association("LocaleTags").Append([]*Tag{tag3}) + if !compareTags(blog.LocaleTags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Blog should has three tags after Append") + } + + if DB.Model(&blog).Association("LocaleTags").Count() != 3 { + t.Errorf("Blog should has three tags after Append") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 0 { + t.Errorf("EN Blog should has 0 tags after ZH Blog Append") + } + + var tags []Tag + DB.Model(&blog).Related(&tags, "LocaleTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Should find 3 tags with Related") + } + + DB.Model(&blog2).Related(&tags, "LocaleTags") + if len(tags) != 0 { + t.Errorf("Should find 0 tags with Related for EN Blog") + } + + var blog1 Blog + DB.Preload("LocaleTags").Find(&blog1, "locale = ? AND id = ?", "ZH", blog.ID) + if !compareTags(blog1.LocaleTags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Preload many2many relations") + } + + var tag4 = &Tag{Locale: "ZH", Value: "tag4"} + DB.Model(&blog2).Association("LocaleTags").Append(tag4) + + DB.Model(&blog).Related(&tags, "LocaleTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Should find 3 tags with Related for EN Blog") + } + + DB.Model(&blog2).Related(&tags, "LocaleTags") + if !compareTags(tags, []string{"tag4"}) { + t.Errorf("Should find 1 tags with Related for EN Blog") + } + + // Replace + var tag5 = &Tag{Locale: "ZH", Value: "tag5"} + var tag6 = &Tag{Locale: "ZH", Value: "tag6"} + DB.Model(&blog2).Association("LocaleTags").Replace(tag5, tag6) + + var tags2 []Tag + DB.Model(&blog).Related(&tags2, "LocaleTags") + if !compareTags(tags2, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("CN Blog's tags should not be changed after EN Blog Replace") + } + + var blog11 Blog + DB.Preload("LocaleTags").First(&blog11, "id = ? AND locale = ?", blog.ID, blog.Locale) + if !compareTags(blog11.LocaleTags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("CN Blog's tags should not be changed after EN Blog Replace") + } + + DB.Model(&blog2).Related(&tags2, "LocaleTags") + if !compareTags(tags2, []string{"tag5", "tag6"}) { + t.Errorf("Should find 2 tags after Replace") + } + + var blog21 Blog + DB.Preload("LocaleTags").First(&blog21, "id = ? AND locale = ?", blog2.ID, blog2.Locale) + if !compareTags(blog21.LocaleTags, []string{"tag5", "tag6"}) { + t.Errorf("EN Blog's tags should be changed after Replace") + } + + if DB.Model(&blog).Association("LocaleTags").Count() != 3 { + t.Errorf("ZH Blog should has three tags after Replace") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 2 { + t.Errorf("EN Blog should has two tags after Replace") + } + + // Delete + DB.Model(&blog).Association("LocaleTags").Delete(tag5) + + if DB.Model(&blog).Association("LocaleTags").Count() != 3 { + t.Errorf("ZH Blog should has three tags after Delete with EN's tag") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 2 { + t.Errorf("EN Blog should has two tags after ZH Blog Delete with EN's tag") + } + + DB.Model(&blog2).Association("LocaleTags").Delete(tag5) + + if DB.Model(&blog).Association("LocaleTags").Count() != 3 { + t.Errorf("ZH Blog should has three tags after EN Blog Delete with EN's tag") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 1 { + t.Errorf("EN Blog should has 1 tags after EN Blog Delete with EN's tag") + } + + // Clear + DB.Model(&blog2).Association("LocaleTags").Clear() + if DB.Model(&blog).Association("LocaleTags").Count() != 3 { + t.Errorf("ZH Blog's tags should not be cleared when clear EN Blog's tags") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 0 { + t.Errorf("EN Blog's tags should be cleared when clear EN Blog's tags") + } + + DB.Model(&blog).Association("LocaleTags").Clear() + if DB.Model(&blog).Association("LocaleTags").Count() != 0 { + t.Errorf("ZH Blog's tags should be cleared when clear ZH Blog's tags") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 0 { + t.Errorf("EN Blog's tags should be cleared") + } + } +} diff --git a/vendor/github.com/jinzhu/gorm/pointer_test.go b/vendor/github.com/jinzhu/gorm/pointer_test.go new file mode 100644 index 000000000000..2a68a5ab249a --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/pointer_test.go @@ -0,0 +1,84 @@ +package gorm_test + +import "testing" + +type PointerStruct struct { + ID int64 + Name *string + Num *int +} + +type NormalStruct struct { + ID int64 + Name string + Num int +} + +func TestPointerFields(t *testing.T) { + DB.DropTable(&PointerStruct{}) + DB.AutoMigrate(&PointerStruct{}) + var name = "pointer struct 1" + var num = 100 + pointerStruct := PointerStruct{Name: &name, Num: &num} + if DB.Create(&pointerStruct).Error != nil { + t.Errorf("Failed to save pointer struct") + } + + var pointerStructResult PointerStruct + if err := DB.First(&pointerStructResult, "id = ?", pointerStruct.ID).Error; err != nil || *pointerStructResult.Name != name || *pointerStructResult.Num != num { + t.Errorf("Failed to query saved pointer struct") + } + + var tableName = DB.NewScope(&PointerStruct{}).TableName() + + var normalStruct NormalStruct + DB.Table(tableName).First(&normalStruct) + if normalStruct.Name != name || normalStruct.Num != num { + t.Errorf("Failed to query saved Normal struct") + } + + var nilPointerStruct = PointerStruct{} + if err := DB.Create(&nilPointerStruct).Error; err != nil { + t.Error("Failed to save nil pointer struct", err) + } + + var pointerStruct2 PointerStruct + if err := DB.First(&pointerStruct2, "id = ?", nilPointerStruct.ID).Error; err != nil { + t.Error("Failed to query saved nil pointer struct", err) + } + + var normalStruct2 NormalStruct + if err := DB.Table(tableName).First(&normalStruct2, "id = ?", nilPointerStruct.ID).Error; err != nil { + t.Error("Failed to query saved nil pointer struct", err) + } + + var partialNilPointerStruct1 = PointerStruct{Num: &num} + if err := DB.Create(&partialNilPointerStruct1).Error; err != nil { + t.Error("Failed to save partial nil pointer struct", err) + } + + var pointerStruct3 PointerStruct + if err := DB.First(&pointerStruct3, "id = ?", partialNilPointerStruct1.ID).Error; err != nil || *pointerStruct3.Num != num { + t.Error("Failed to query saved partial nil pointer struct", err) + } + + var normalStruct3 NormalStruct + if err := DB.Table(tableName).First(&normalStruct3, "id = ?", partialNilPointerStruct1.ID).Error; err != nil || normalStruct3.Num != num { + t.Error("Failed to query saved partial pointer struct", err) + } + + var partialNilPointerStruct2 = PointerStruct{Name: &name} + if err := DB.Create(&partialNilPointerStruct2).Error; err != nil { + t.Error("Failed to save partial nil pointer struct", err) + } + + var pointerStruct4 PointerStruct + if err := DB.First(&pointerStruct4, "id = ?", partialNilPointerStruct2.ID).Error; err != nil || *pointerStruct4.Name != name { + t.Error("Failed to query saved partial nil pointer struct", err) + } + + var normalStruct4 NormalStruct + if err := DB.Table(tableName).First(&normalStruct4, "id = ?", partialNilPointerStruct2.ID).Error; err != nil || normalStruct4.Name != name { + t.Error("Failed to query saved partial pointer struct", err) + } +} diff --git a/vendor/github.com/jinzhu/gorm/polymorphic_test.go b/vendor/github.com/jinzhu/gorm/polymorphic_test.go new file mode 100644 index 000000000000..d1ecfbbb6497 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/polymorphic_test.go @@ -0,0 +1,366 @@ +package gorm_test + +import ( + "reflect" + "sort" + "testing" +) + +type Cat struct { + Id int + Name string + Toy Toy `gorm:"polymorphic:Owner;"` +} + +type Dog struct { + Id int + Name string + Toys []Toy `gorm:"polymorphic:Owner;"` +} + +type Hamster struct { + Id int + Name string + PreferredToy Toy `gorm:"polymorphic:Owner;polymorphic_value:hamster_preferred"` + OtherToy Toy `gorm:"polymorphic:Owner;polymorphic_value:hamster_other"` +} + +type Toy struct { + Id int + Name string + OwnerId int + OwnerType string +} + +var compareToys = func(toys []Toy, contents []string) bool { + var toyContents []string + for _, toy := range toys { + toyContents = append(toyContents, toy.Name) + } + sort.Strings(toyContents) + sort.Strings(contents) + return reflect.DeepEqual(toyContents, contents) +} + +func TestPolymorphic(t *testing.T) { + cat := Cat{Name: "Mr. Bigglesworth", Toy: Toy{Name: "cat toy"}} + dog := Dog{Name: "Pluto", Toys: []Toy{{Name: "dog toy 1"}, {Name: "dog toy 2"}}} + DB.Save(&cat).Save(&dog) + + if DB.Model(&cat).Association("Toy").Count() != 1 { + t.Errorf("Cat's toys count should be 1") + } + + if DB.Model(&dog).Association("Toys").Count() != 2 { + t.Errorf("Dog's toys count should be 2") + } + + // Query + var catToys []Toy + if DB.Model(&cat).Related(&catToys, "Toy").RecordNotFound() { + t.Errorf("Did not find any has one polymorphic association") + } else if len(catToys) != 1 { + t.Errorf("Should have found only one polymorphic has one association") + } else if catToys[0].Name != cat.Toy.Name { + t.Errorf("Should have found the proper has one polymorphic association") + } + + var dogToys []Toy + if DB.Model(&dog).Related(&dogToys, "Toys").RecordNotFound() { + t.Errorf("Did not find any polymorphic has many associations") + } else if len(dogToys) != len(dog.Toys) { + t.Errorf("Should have found all polymorphic has many associations") + } + + var catToy Toy + DB.Model(&cat).Association("Toy").Find(&catToy) + if catToy.Name != cat.Toy.Name { + t.Errorf("Should find has one polymorphic association") + } + + var dogToys1 []Toy + DB.Model(&dog).Association("Toys").Find(&dogToys1) + if !compareToys(dogToys1, []string{"dog toy 1", "dog toy 2"}) { + t.Errorf("Should find has many polymorphic association") + } + + // Append + DB.Model(&cat).Association("Toy").Append(&Toy{ + Name: "cat toy 2", + }) + + var catToy2 Toy + DB.Model(&cat).Association("Toy").Find(&catToy2) + if catToy2.Name != "cat toy 2" { + t.Errorf("Should update has one polymorphic association with Append") + } + + if DB.Model(&cat).Association("Toy").Count() != 1 { + t.Errorf("Cat's toys count should be 1 after Append") + } + + if DB.Model(&dog).Association("Toys").Count() != 2 { + t.Errorf("Should return two polymorphic has many associations") + } + + DB.Model(&dog).Association("Toys").Append(&Toy{ + Name: "dog toy 3", + }) + + var dogToys2 []Toy + DB.Model(&dog).Association("Toys").Find(&dogToys2) + if !compareToys(dogToys2, []string{"dog toy 1", "dog toy 2", "dog toy 3"}) { + t.Errorf("Dog's toys should be updated with Append") + } + + if DB.Model(&dog).Association("Toys").Count() != 3 { + t.Errorf("Should return three polymorphic has many associations") + } + + // Replace + DB.Model(&cat).Association("Toy").Replace(&Toy{ + Name: "cat toy 3", + }) + + var catToy3 Toy + DB.Model(&cat).Association("Toy").Find(&catToy3) + if catToy3.Name != "cat toy 3" { + t.Errorf("Should update has one polymorphic association with Replace") + } + + if DB.Model(&cat).Association("Toy").Count() != 1 { + t.Errorf("Cat's toys count should be 1 after Replace") + } + + if DB.Model(&dog).Association("Toys").Count() != 3 { + t.Errorf("Should return three polymorphic has many associations") + } + + DB.Model(&dog).Association("Toys").Replace(&Toy{ + Name: "dog toy 4", + }, []Toy{ + {Name: "dog toy 5"}, {Name: "dog toy 6"}, {Name: "dog toy 7"}, + }) + + var dogToys3 []Toy + DB.Model(&dog).Association("Toys").Find(&dogToys3) + if !compareToys(dogToys3, []string{"dog toy 4", "dog toy 5", "dog toy 6", "dog toy 7"}) { + t.Errorf("Dog's toys should be updated with Replace") + } + + if DB.Model(&dog).Association("Toys").Count() != 4 { + t.Errorf("Should return three polymorphic has many associations") + } + + // Delete + DB.Model(&cat).Association("Toy").Delete(&catToy2) + + var catToy4 Toy + DB.Model(&cat).Association("Toy").Find(&catToy4) + if catToy4.Name != "cat toy 3" { + t.Errorf("Should not update has one polymorphic association when Delete a unrelated Toy") + } + + if DB.Model(&cat).Association("Toy").Count() != 1 { + t.Errorf("Cat's toys count should be 1") + } + + if DB.Model(&dog).Association("Toys").Count() != 4 { + t.Errorf("Dog's toys count should be 4") + } + + DB.Model(&cat).Association("Toy").Delete(&catToy3) + + if !DB.Model(&cat).Related(&Toy{}, "Toy").RecordNotFound() { + t.Errorf("Toy should be deleted with Delete") + } + + if DB.Model(&cat).Association("Toy").Count() != 0 { + t.Errorf("Cat's toys count should be 0 after Delete") + } + + if DB.Model(&dog).Association("Toys").Count() != 4 { + t.Errorf("Dog's toys count should not be changed when delete cat's toy") + } + + DB.Model(&dog).Association("Toys").Delete(&dogToys2) + + if DB.Model(&dog).Association("Toys").Count() != 4 { + t.Errorf("Dog's toys count should not be changed when delete unrelated toys") + } + + DB.Model(&dog).Association("Toys").Delete(&dogToys3) + + if DB.Model(&dog).Association("Toys").Count() != 0 { + t.Errorf("Dog's toys count should be deleted with Delete") + } + + // Clear + DB.Model(&cat).Association("Toy").Append(&Toy{ + Name: "cat toy 2", + }) + + if DB.Model(&cat).Association("Toy").Count() != 1 { + t.Errorf("Cat's toys should be added with Append") + } + + DB.Model(&cat).Association("Toy").Clear() + + if DB.Model(&cat).Association("Toy").Count() != 0 { + t.Errorf("Cat's toys should be cleared with Clear") + } + + DB.Model(&dog).Association("Toys").Append(&Toy{ + Name: "dog toy 8", + }) + + if DB.Model(&dog).Association("Toys").Count() != 1 { + t.Errorf("Dog's toys should be added with Append") + } + + DB.Model(&dog).Association("Toys").Clear() + + if DB.Model(&dog).Association("Toys").Count() != 0 { + t.Errorf("Dog's toys should be cleared with Clear") + } +} + +func TestNamedPolymorphic(t *testing.T) { + hamster := Hamster{Name: "Mr. Hammond", PreferredToy: Toy{Name: "bike"}, OtherToy: Toy{Name: "treadmill"}} + DB.Save(&hamster) + + hamster2 := Hamster{} + DB.Preload("PreferredToy").Preload("OtherToy").Find(&hamster2, hamster.Id) + if hamster2.PreferredToy.Id != hamster.PreferredToy.Id || hamster2.PreferredToy.Name != hamster.PreferredToy.Name { + t.Errorf("Hamster's preferred toy couldn't be preloaded") + } + if hamster2.OtherToy.Id != hamster.OtherToy.Id || hamster2.OtherToy.Name != hamster.OtherToy.Name { + t.Errorf("Hamster's other toy couldn't be preloaded") + } + + // clear to omit Toy.Id in count + hamster2.PreferredToy = Toy{} + hamster2.OtherToy = Toy{} + + if DB.Model(&hamster2).Association("PreferredToy").Count() != 1 { + t.Errorf("Hamster's preferred toy count should be 1") + } + + if DB.Model(&hamster2).Association("OtherToy").Count() != 1 { + t.Errorf("Hamster's other toy count should be 1") + } + + // Query + var hamsterToys []Toy + if DB.Model(&hamster).Related(&hamsterToys, "PreferredToy").RecordNotFound() { + t.Errorf("Did not find any has one polymorphic association") + } else if len(hamsterToys) != 1 { + t.Errorf("Should have found only one polymorphic has one association") + } else if hamsterToys[0].Name != hamster.PreferredToy.Name { + t.Errorf("Should have found the proper has one polymorphic association") + } + + if DB.Model(&hamster).Related(&hamsterToys, "OtherToy").RecordNotFound() { + t.Errorf("Did not find any has one polymorphic association") + } else if len(hamsterToys) != 1 { + t.Errorf("Should have found only one polymorphic has one association") + } else if hamsterToys[0].Name != hamster.OtherToy.Name { + t.Errorf("Should have found the proper has one polymorphic association") + } + + hamsterToy := Toy{} + DB.Model(&hamster).Association("PreferredToy").Find(&hamsterToy) + if hamsterToy.Name != hamster.PreferredToy.Name { + t.Errorf("Should find has one polymorphic association") + } + hamsterToy = Toy{} + DB.Model(&hamster).Association("OtherToy").Find(&hamsterToy) + if hamsterToy.Name != hamster.OtherToy.Name { + t.Errorf("Should find has one polymorphic association") + } + + // Append + DB.Model(&hamster).Association("PreferredToy").Append(&Toy{ + Name: "bike 2", + }) + DB.Model(&hamster).Association("OtherToy").Append(&Toy{ + Name: "treadmill 2", + }) + + hamsterToy = Toy{} + DB.Model(&hamster).Association("PreferredToy").Find(&hamsterToy) + if hamsterToy.Name != "bike 2" { + t.Errorf("Should update has one polymorphic association with Append") + } + + hamsterToy = Toy{} + DB.Model(&hamster).Association("OtherToy").Find(&hamsterToy) + if hamsterToy.Name != "treadmill 2" { + t.Errorf("Should update has one polymorphic association with Append") + } + + if DB.Model(&hamster2).Association("PreferredToy").Count() != 1 { + t.Errorf("Hamster's toys count should be 1 after Append") + } + + if DB.Model(&hamster2).Association("OtherToy").Count() != 1 { + t.Errorf("Hamster's toys count should be 1 after Append") + } + + // Replace + DB.Model(&hamster).Association("PreferredToy").Replace(&Toy{ + Name: "bike 3", + }) + DB.Model(&hamster).Association("OtherToy").Replace(&Toy{ + Name: "treadmill 3", + }) + + hamsterToy = Toy{} + DB.Model(&hamster).Association("PreferredToy").Find(&hamsterToy) + if hamsterToy.Name != "bike 3" { + t.Errorf("Should update has one polymorphic association with Replace") + } + + hamsterToy = Toy{} + DB.Model(&hamster).Association("OtherToy").Find(&hamsterToy) + if hamsterToy.Name != "treadmill 3" { + t.Errorf("Should update has one polymorphic association with Replace") + } + + if DB.Model(&hamster2).Association("PreferredToy").Count() != 1 { + t.Errorf("hamster's toys count should be 1 after Replace") + } + + if DB.Model(&hamster2).Association("OtherToy").Count() != 1 { + t.Errorf("hamster's toys count should be 1 after Replace") + } + + // Clear + DB.Model(&hamster).Association("PreferredToy").Append(&Toy{ + Name: "bike 2", + }) + DB.Model(&hamster).Association("OtherToy").Append(&Toy{ + Name: "treadmill 2", + }) + + if DB.Model(&hamster).Association("PreferredToy").Count() != 1 { + t.Errorf("Hamster's toys should be added with Append") + } + if DB.Model(&hamster).Association("OtherToy").Count() != 1 { + t.Errorf("Hamster's toys should be added with Append") + } + + DB.Model(&hamster).Association("PreferredToy").Clear() + + if DB.Model(&hamster2).Association("PreferredToy").Count() != 0 { + t.Errorf("Hamster's preferred toy should be cleared with Clear") + } + if DB.Model(&hamster2).Association("OtherToy").Count() != 1 { + t.Errorf("Hamster's other toy should be still available") + } + + DB.Model(&hamster).Association("OtherToy").Clear() + if DB.Model(&hamster).Association("OtherToy").Count() != 0 { + t.Errorf("Hamster's other toy should be cleared with Clear") + } +} diff --git a/vendor/github.com/jinzhu/gorm/preload_test.go b/vendor/github.com/jinzhu/gorm/preload_test.go new file mode 100644 index 000000000000..8b8b39b82384 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/preload_test.go @@ -0,0 +1,1606 @@ +package gorm_test + +import ( + "database/sql" + "encoding/json" + "os" + "reflect" + "testing" + + "github.com/jinzhu/gorm" +) + +func getPreloadUser(name string) *User { + return getPreparedUser(name, "Preload") +} + +func checkUserHasPreloadData(user User, t *testing.T) { + u := getPreloadUser(user.Name) + if user.BillingAddress.Address1 != u.BillingAddress.Address1 { + t.Error("Failed to preload user's BillingAddress") + } + + if user.ShippingAddress.Address1 != u.ShippingAddress.Address1 { + t.Error("Failed to preload user's ShippingAddress") + } + + if user.CreditCard.Number != u.CreditCard.Number { + t.Error("Failed to preload user's CreditCard") + } + + if user.Company.Name != u.Company.Name { + t.Error("Failed to preload user's Company") + } + + if len(user.Emails) != len(u.Emails) { + t.Error("Failed to preload user's Emails") + } else { + var found int + for _, e1 := range u.Emails { + for _, e2 := range user.Emails { + if e1.Email == e2.Email { + found++ + break + } + } + } + if found != len(u.Emails) { + t.Error("Failed to preload user's email details") + } + } +} + +func TestPreload(t *testing.T) { + user1 := getPreloadUser("user1") + DB.Save(user1) + + preloadDB := DB.Where("role = ?", "Preload").Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company") + var user User + preloadDB.Find(&user) + checkUserHasPreloadData(user, t) + + user2 := getPreloadUser("user2") + DB.Save(user2) + + user3 := getPreloadUser("user3") + DB.Save(user3) + + var users []User + preloadDB.Find(&users) + + for _, user := range users { + checkUserHasPreloadData(user, t) + } + + var users2 []*User + preloadDB.Find(&users2) + + for _, user := range users2 { + checkUserHasPreloadData(*user, t) + } + + var users3 []*User + preloadDB.Preload("Emails", "email = ?", user3.Emails[0].Email).Find(&users3) + + for _, user := range users3 { + if user.Name == user3.Name { + if len(user.Emails) != 1 { + t.Errorf("should only preload one emails for user3 when with condition") + } + } else if len(user.Emails) != 0 { + t.Errorf("should not preload any emails for other users when with condition") + } else if user.Emails == nil { + t.Errorf("should return an empty slice to indicate zero results") + } + } +} + +func TestNestedPreload1(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1 Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{Level2: Level2{Level1: Level1{Value: "value"}}} + if err := DB.Create(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got, "name = ?", "not_found").Error; err != gorm.ErrRecordNotFound { + t.Error(err) + } +} + +func TestNestedPreload2(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1s []*Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2s []Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Level2s: []Level2{ + { + Level1s: []*Level1{ + {Value: "value1"}, + {Value: "value2"}, + }, + }, + { + Level1s: []*Level1{ + {Value: "value3"}, + }, + }, + }, + } + if err := DB.Create(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2s.Level1s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload3(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1 Level1 + Level3ID uint + } + Level3 struct { + Name string + ID uint + Level2s []Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Level2s: []Level2{ + {Level1: Level1{Value: "value1"}}, + {Level1: Level1{Value: "value2"}}, + }, + } + if err := DB.Create(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2s.Level1").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload4(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Level2: Level2{ + Level1s: []Level1{ + {Value: "value1"}, + {Value: "value2"}, + }, + }, + } + if err := DB.Create(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2.Level1s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +// Slice: []Level3 +func TestNestedPreload5(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1 Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := make([]Level3, 2) + want[0] = Level3{Level2: Level2{Level1: Level1{Value: "value"}}} + if err := DB.Create(&want[0]).Error; err != nil { + t.Error(err) + } + want[1] = Level3{Level2: Level2{Level1: Level1{Value: "value2"}}} + if err := DB.Create(&want[1]).Error; err != nil { + t.Error(err) + } + + var got []Level3 + if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload6(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2s []Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := make([]Level3, 2) + want[0] = Level3{ + Level2s: []Level2{ + { + Level1s: []Level1{ + {Value: "value1"}, + {Value: "value2"}, + }, + }, + { + Level1s: []Level1{ + {Value: "value3"}, + }, + }, + }, + } + if err := DB.Create(&want[0]).Error; err != nil { + t.Error(err) + } + + want[1] = Level3{ + Level2s: []Level2{ + { + Level1s: []Level1{ + {Value: "value3"}, + {Value: "value4"}, + }, + }, + { + Level1s: []Level1{ + {Value: "value5"}, + }, + }, + }, + } + if err := DB.Create(&want[1]).Error; err != nil { + t.Error(err) + } + + var got []Level3 + if err := DB.Preload("Level2s.Level1s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload7(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1 Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2s []Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := make([]Level3, 2) + want[0] = Level3{ + Level2s: []Level2{ + {Level1: Level1{Value: "value1"}}, + {Level1: Level1{Value: "value2"}}, + }, + } + if err := DB.Create(&want[0]).Error; err != nil { + t.Error(err) + } + + want[1] = Level3{ + Level2s: []Level2{ + {Level1: Level1{Value: "value3"}}, + {Level1: Level1{Value: "value4"}}, + }, + } + if err := DB.Create(&want[1]).Error; err != nil { + t.Error(err) + } + + var got []Level3 + if err := DB.Preload("Level2s.Level1").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload8(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := make([]Level3, 2) + want[0] = Level3{ + Level2: Level2{ + Level1s: []Level1{ + {Value: "value1"}, + {Value: "value2"}, + }, + }, + } + if err := DB.Create(&want[0]).Error; err != nil { + t.Error(err) + } + want[1] = Level3{ + Level2: Level2{ + Level1s: []Level1{ + {Value: "value3"}, + {Value: "value4"}, + }, + }, + } + if err := DB.Create(&want[1]).Error; err != nil { + t.Error(err) + } + + var got []Level3 + if err := DB.Preload("Level2.Level1s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload9(t *testing.T) { + type ( + Level0 struct { + ID uint + Value string + Level1ID uint + } + Level1 struct { + ID uint + Value string + Level2ID uint + Level2_1ID uint + Level0s []Level0 + } + Level2 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level2_1 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + Level2_1 Level2_1 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level2_1{}) + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level0{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}, &Level2_1{}, &Level0{}).Error; err != nil { + t.Error(err) + } + + want := make([]Level3, 2) + want[0] = Level3{ + Level2: Level2{ + Level1s: []Level1{ + {Value: "value1"}, + {Value: "value2"}, + }, + }, + Level2_1: Level2_1{ + Level1s: []Level1{ + { + Value: "value1-1", + Level0s: []Level0{{Value: "Level0-1"}}, + }, + { + Value: "value2-2", + Level0s: []Level0{{Value: "Level0-2"}}, + }, + }, + }, + } + if err := DB.Create(&want[0]).Error; err != nil { + t.Error(err) + } + want[1] = Level3{ + Level2: Level2{ + Level1s: []Level1{ + {Value: "value3"}, + {Value: "value4"}, + }, + }, + Level2_1: Level2_1{ + Level1s: []Level1{ + { + Value: "value3-3", + Level0s: []Level0{}, + }, + { + Value: "value4-4", + Level0s: []Level0{}, + }, + }, + }, + } + if err := DB.Create(&want[1]).Error; err != nil { + t.Error(err) + } + + var got []Level3 + if err := DB.Preload("Level2").Preload("Level2.Level1s").Preload("Level2_1").Preload("Level2_1.Level1s").Preload("Level2_1.Level1s.Level0s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +type LevelA1 struct { + ID uint + Value string +} + +type LevelA2 struct { + ID uint + Value string + LevelA3s []*LevelA3 +} + +type LevelA3 struct { + ID uint + Value string + LevelA1ID sql.NullInt64 + LevelA1 *LevelA1 + LevelA2ID sql.NullInt64 + LevelA2 *LevelA2 +} + +func TestNestedPreload10(t *testing.T) { + DB.DropTableIfExists(&LevelA3{}) + DB.DropTableIfExists(&LevelA2{}) + DB.DropTableIfExists(&LevelA1{}) + + if err := DB.AutoMigrate(&LevelA1{}, &LevelA2{}, &LevelA3{}).Error; err != nil { + t.Error(err) + } + + levelA1 := &LevelA1{Value: "foo"} + if err := DB.Save(levelA1).Error; err != nil { + t.Error(err) + } + + want := []*LevelA2{ + { + Value: "bar", + LevelA3s: []*LevelA3{ + { + Value: "qux", + LevelA1: levelA1, + }, + }, + }, + { + Value: "bar 2", + LevelA3s: []*LevelA3{}, + }, + } + for _, levelA2 := range want { + if err := DB.Save(levelA2).Error; err != nil { + t.Error(err) + } + } + + var got []*LevelA2 + if err := DB.Preload("LevelA3s.LevelA1").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +type LevelB1 struct { + ID uint + Value string + LevelB3s []*LevelB3 +} + +type LevelB2 struct { + ID uint + Value string +} + +type LevelB3 struct { + ID uint + Value string + LevelB1ID sql.NullInt64 + LevelB1 *LevelB1 + LevelB2s []*LevelB2 `gorm:"many2many:levelb1_levelb3_levelb2s"` +} + +func TestNestedPreload11(t *testing.T) { + DB.DropTableIfExists(&LevelB2{}) + DB.DropTableIfExists(&LevelB3{}) + DB.DropTableIfExists(&LevelB1{}) + if err := DB.AutoMigrate(&LevelB1{}, &LevelB2{}, &LevelB3{}).Error; err != nil { + t.Error(err) + } + + levelB1 := &LevelB1{Value: "foo"} + if err := DB.Create(levelB1).Error; err != nil { + t.Error(err) + } + + levelB3 := &LevelB3{ + Value: "bar", + LevelB1ID: sql.NullInt64{Valid: true, Int64: int64(levelB1.ID)}, + } + if err := DB.Create(levelB3).Error; err != nil { + t.Error(err) + } + levelB1.LevelB3s = []*LevelB3{levelB3} + + want := []*LevelB1{levelB1} + var got []*LevelB1 + if err := DB.Preload("LevelB3s.LevelB2s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +type LevelC1 struct { + ID uint + Value string + LevelC2ID uint +} + +type LevelC2 struct { + ID uint + Value string + LevelC1 LevelC1 +} + +type LevelC3 struct { + ID uint + Value string + LevelC2ID uint + LevelC2 LevelC2 +} + +func TestNestedPreload12(t *testing.T) { + DB.DropTableIfExists(&LevelC2{}) + DB.DropTableIfExists(&LevelC3{}) + DB.DropTableIfExists(&LevelC1{}) + if err := DB.AutoMigrate(&LevelC1{}, &LevelC2{}, &LevelC3{}).Error; err != nil { + t.Error(err) + } + + level2 := LevelC2{ + Value: "c2", + LevelC1: LevelC1{ + Value: "c1", + }, + } + DB.Create(&level2) + + want := []LevelC3{ + { + Value: "c3-1", + LevelC2: level2, + }, { + Value: "c3-2", + LevelC2: level2, + }, + } + + for i := range want { + if err := DB.Create(&want[i]).Error; err != nil { + t.Error(err) + } + } + + var got []LevelC3 + if err := DB.Preload("LevelC2").Preload("LevelC2.LevelC1").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestManyToManyPreloadWithMultiPrimaryKeys(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect == "" || dialect == "sqlite" { + return + } + + type ( + Level1 struct { + ID uint `gorm:"primary_key;"` + LanguageCode string `gorm:"primary_key"` + Value string + } + Level2 struct { + ID uint `gorm:"primary_key;"` + LanguageCode string `gorm:"primary_key"` + Value string + Level1s []Level1 `gorm:"many2many:levels;"` + } + ) + + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists("levels") + + if err := DB.AutoMigrate(&Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level2{Value: "Bob", LanguageCode: "ru", Level1s: []Level1{ + {Value: "ru", LanguageCode: "ru"}, + {Value: "en", LanguageCode: "en"}, + }} + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + want2 := Level2{Value: "Tom", LanguageCode: "zh", Level1s: []Level1{ + {Value: "zh", LanguageCode: "zh"}, + {Value: "de", LanguageCode: "de"}, + }} + if err := DB.Save(&want2).Error; err != nil { + t.Error(err) + } + + var got Level2 + if err := DB.Preload("Level1s").Find(&got, "value = ?", "Bob").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + var got2 Level2 + if err := DB.Preload("Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got2, want2) { + t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2)) + } + + var got3 []Level2 + if err := DB.Preload("Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got3, []Level2{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level2{got, got2})) + } + + var got4 []Level2 + if err := DB.Preload("Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + var ruLevel1 Level1 + var zhLevel1 Level1 + DB.First(&ruLevel1, "value = ?", "ru") + DB.First(&zhLevel1, "value = ?", "zh") + + got.Level1s = []Level1{ruLevel1} + got2.Level1s = []Level1{zhLevel1} + if !reflect.DeepEqual(got4, []Level2{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level2{got, got2})) + } + + if err := DB.Preload("Level1s").Find(&got4, "value IN (?)", []string{"non-existing"}).Error; err != nil { + t.Error(err) + } +} + +func TestManyToManyPreloadForNestedPointer(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level1s []*Level1 `gorm:"many2many:levels;"` + } + Level3 struct { + ID uint + Value string + Level2ID sql.NullInt64 + Level2 *Level2 + } + ) + + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists("levels") + + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Value: "Bob", + Level2: &Level2{ + Value: "Foo", + Level1s: []*Level1{ + {Value: "ru"}, + {Value: "en"}, + }, + }, + } + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + want2 := Level3{ + Value: "Tom", + Level2: &Level2{ + Value: "Bar", + Level1s: []*Level1{ + {Value: "zh"}, + {Value: "de"}, + }, + }, + } + if err := DB.Save(&want2).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "Bob").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + var got2 Level3 + if err := DB.Preload("Level2.Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got2, want2) { + t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2)) + } + + var got3 []Level3 + if err := DB.Preload("Level2.Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got3, []Level3{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level3{got, got2})) + } + + var got4 []Level3 + if err := DB.Preload("Level2.Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + var got5 Level3 + DB.Preload("Level2.Level1s").Find(&got5, "value = ?", "bogus") + + var ruLevel1 Level1 + var zhLevel1 Level1 + DB.First(&ruLevel1, "value = ?", "ru") + DB.First(&zhLevel1, "value = ?", "zh") + + got.Level2.Level1s = []*Level1{&ruLevel1} + got2.Level2.Level1s = []*Level1{&zhLevel1} + if !reflect.DeepEqual(got4, []Level3{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level3{got, got2})) + } +} + +func TestNestedManyToManyPreload(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level1s []*Level1 `gorm:"many2many:level1_level2;"` + } + Level3 struct { + ID uint + Value string + Level2s []Level2 `gorm:"many2many:level2_level3;"` + } + ) + + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists("level1_level2") + DB.DropTableIfExists("level2_level3") + + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Value: "Level3", + Level2s: []Level2{ + { + Value: "Bob", + Level1s: []*Level1{ + {Value: "ru"}, + {Value: "en"}, + }, + }, { + Value: "Tom", + Level1s: []*Level1{ + {Value: "zh"}, + {Value: "de"}, + }, + }, + }, + } + + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2s").Preload("Level2s.Level1s").Find(&got, "value = ?", "Level3").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + if err := DB.Preload("Level2s.Level1s").Find(&got, "value = ?", "not_found").Error; err != gorm.ErrRecordNotFound { + t.Error(err) + } +} + +func TestNestedManyToManyPreload2(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level1s []*Level1 `gorm:"many2many:level1_level2;"` + } + Level3 struct { + ID uint + Value string + Level2ID sql.NullInt64 + Level2 *Level2 + } + ) + + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists("level1_level2") + + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Value: "Level3", + Level2: &Level2{ + Value: "Bob", + Level1s: []*Level1{ + {Value: "ru"}, + {Value: "en"}, + }, + }, + } + + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "Level3").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "not_found").Error; err != gorm.ErrRecordNotFound { + t.Error(err) + } +} + +func TestNestedManyToManyPreload3(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level1s []*Level1 `gorm:"many2many:level1_level2;"` + } + Level3 struct { + ID uint + Value string + Level2ID sql.NullInt64 + Level2 *Level2 + } + ) + + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists("level1_level2") + + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + level1Zh := &Level1{Value: "zh"} + level1Ru := &Level1{Value: "ru"} + level1En := &Level1{Value: "en"} + + level21 := &Level2{ + Value: "Level2-1", + Level1s: []*Level1{level1Zh, level1Ru}, + } + + level22 := &Level2{ + Value: "Level2-2", + Level1s: []*Level1{level1Zh, level1En}, + } + + wants := []*Level3{ + { + Value: "Level3-1", + Level2: level21, + }, + { + Value: "Level3-2", + Level2: level22, + }, + { + Value: "Level3-3", + Level2: level21, + }, + } + + for _, want := range wants { + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + } + + var gots []*Level3 + if err := DB.Preload("Level2.Level1s", func(db *gorm.DB) *gorm.DB { + return db.Order("level1.id ASC") + }).Find(&gots).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(gots, wants) { + t.Errorf("got %s; want %s", toJSONString(gots), toJSONString(wants)) + } +} + +func TestNestedManyToManyPreload3ForStruct(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level1s []Level1 `gorm:"many2many:level1_level2;"` + } + Level3 struct { + ID uint + Value string + Level2ID sql.NullInt64 + Level2 Level2 + } + ) + + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists("level1_level2") + + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + level1Zh := Level1{Value: "zh"} + level1Ru := Level1{Value: "ru"} + level1En := Level1{Value: "en"} + + level21 := Level2{ + Value: "Level2-1", + Level1s: []Level1{level1Zh, level1Ru}, + } + + level22 := Level2{ + Value: "Level2-2", + Level1s: []Level1{level1Zh, level1En}, + } + + wants := []*Level3{ + { + Value: "Level3-1", + Level2: level21, + }, + { + Value: "Level3-2", + Level2: level22, + }, + { + Value: "Level3-3", + Level2: level21, + }, + } + + for _, want := range wants { + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + } + + var gots []*Level3 + if err := DB.Preload("Level2.Level1s", func(db *gorm.DB) *gorm.DB { + return db.Order("level1.id ASC") + }).Find(&gots).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(gots, wants) { + t.Errorf("got %s; want %s", toJSONString(gots), toJSONString(wants)) + } +} + +func TestNestedManyToManyPreload4(t *testing.T) { + type ( + Level4 struct { + ID uint + Value string + Level3ID uint + } + Level3 struct { + ID uint + Value string + Level4s []*Level4 + } + Level2 struct { + ID uint + Value string + Level3s []*Level3 `gorm:"many2many:level2_level3;"` + } + Level1 struct { + ID uint + Value string + Level2s []*Level2 `gorm:"many2many:level1_level2;"` + } + ) + + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level4{}) + DB.DropTableIfExists("level1_level2") + DB.DropTableIfExists("level2_level3") + + dummy := Level1{ + Value: "Level1", + Level2s: []*Level2{{ + Value: "Level2", + Level3s: []*Level3{{ + Value: "Level3", + Level4s: []*Level4{{ + Value: "Level4", + }}, + }}, + }}, + } + + if err := DB.AutoMigrate(&Level4{}, &Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + if err := DB.Save(&dummy).Error; err != nil { + t.Error(err) + } + + var level1 Level1 + if err := DB.Preload("Level2s").Preload("Level2s.Level3s").Preload("Level2s.Level3s.Level4s").First(&level1).Error; err != nil { + t.Error(err) + } +} + +func TestManyToManyPreloadForPointer(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level1s []*Level1 `gorm:"many2many:levels;"` + } + ) + + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists("levels") + + if err := DB.AutoMigrate(&Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level2{Value: "Bob", Level1s: []*Level1{ + {Value: "ru"}, + {Value: "en"}, + }} + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + want2 := Level2{Value: "Tom", Level1s: []*Level1{ + {Value: "zh"}, + {Value: "de"}, + }} + if err := DB.Save(&want2).Error; err != nil { + t.Error(err) + } + + var got Level2 + if err := DB.Preload("Level1s").Find(&got, "value = ?", "Bob").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + var got2 Level2 + if err := DB.Preload("Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got2, want2) { + t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2)) + } + + var got3 []Level2 + if err := DB.Preload("Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got3, []Level2{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level2{got, got2})) + } + + var got4 []Level2 + if err := DB.Preload("Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + var got5 Level2 + DB.Preload("Level1s").First(&got5, "value = ?", "bogus") + + var ruLevel1 Level1 + var zhLevel1 Level1 + DB.First(&ruLevel1, "value = ?", "ru") + DB.First(&zhLevel1, "value = ?", "zh") + + got.Level1s = []*Level1{&ruLevel1} + got2.Level1s = []*Level1{&zhLevel1} + if !reflect.DeepEqual(got4, []Level2{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level2{got, got2})) + } +} + +func TestNilPointerSlice(t *testing.T) { + type ( + Level3 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level3ID uint + Level3 *Level3 + } + Level1 struct { + ID uint + Value string + Level2ID uint + Level2 *Level2 + } + ) + + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level1{ + Value: "Bob", + Level2: &Level2{ + Value: "en", + Level3: &Level3{ + Value: "native", + }, + }, + } + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + want2 := Level1{ + Value: "Tom", + Level2: nil, + } + if err := DB.Save(&want2).Error; err != nil { + t.Error(err) + } + + var got []Level1 + if err := DB.Preload("Level2").Preload("Level2.Level3").Find(&got).Error; err != nil { + t.Error(err) + } + + if len(got) != 2 { + t.Errorf("got %v items, expected 2", len(got)) + } + + if !reflect.DeepEqual(got[0], want) && !reflect.DeepEqual(got[1], want) { + t.Errorf("got %s; want array containing %s", toJSONString(got), toJSONString(want)) + } + + if !reflect.DeepEqual(got[0], want2) && !reflect.DeepEqual(got[1], want2) { + t.Errorf("got %s; want array containing %s", toJSONString(got), toJSONString(want2)) + } +} + +func TestNilPointerSlice2(t *testing.T) { + type ( + Level4 struct { + ID uint + } + Level3 struct { + ID uint + Level4ID sql.NullInt64 `sql:"index"` + Level4 *Level4 + } + Level2 struct { + ID uint + Level3s []*Level3 `gorm:"many2many:level2_level3s"` + } + Level1 struct { + ID uint + Level2ID sql.NullInt64 `sql:"index"` + Level2 *Level2 + } + ) + + DB.DropTableIfExists(new(Level4)) + DB.DropTableIfExists(new(Level3)) + DB.DropTableIfExists(new(Level2)) + DB.DropTableIfExists(new(Level1)) + + if err := DB.AutoMigrate(new(Level4), new(Level3), new(Level2), new(Level1)).Error; err != nil { + t.Error(err) + } + + want := new(Level1) + if err := DB.Save(want).Error; err != nil { + t.Error(err) + } + + got := new(Level1) + err := DB.Preload("Level2.Level3s.Level4").Last(&got).Error + if err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestPrefixedPreloadDuplication(t *testing.T) { + type ( + Level4 struct { + ID uint + Name string + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level4s []*Level4 + } + Level2 struct { + ID uint + Name string + Level3ID sql.NullInt64 `sql:"index"` + Level3 *Level3 + } + Level1 struct { + ID uint + Name string + Level2ID sql.NullInt64 `sql:"index"` + Level2 *Level2 + } + ) + + DB.DropTableIfExists(new(Level3)) + DB.DropTableIfExists(new(Level4)) + DB.DropTableIfExists(new(Level2)) + DB.DropTableIfExists(new(Level1)) + + if err := DB.AutoMigrate(new(Level3), new(Level4), new(Level2), new(Level1)).Error; err != nil { + t.Error(err) + } + + lvl := &Level3{} + if err := DB.Save(lvl).Error; err != nil { + t.Error(err) + } + + sublvl1 := &Level4{Level3ID: lvl.ID} + if err := DB.Save(sublvl1).Error; err != nil { + t.Error(err) + } + sublvl2 := &Level4{Level3ID: lvl.ID} + if err := DB.Save(sublvl2).Error; err != nil { + t.Error(err) + } + + lvl.Level4s = []*Level4{sublvl1, sublvl2} + + want1 := Level1{ + Level2: &Level2{ + Level3: lvl, + }, + } + if err := DB.Save(&want1).Error; err != nil { + t.Error(err) + } + + want2 := Level1{ + Level2: &Level2{ + Level3: lvl, + }, + } + if err := DB.Save(&want2).Error; err != nil { + t.Error(err) + } + + want := []Level1{want1, want2} + + var got []Level1 + err := DB.Preload("Level2.Level3.Level4s").Find(&got).Error + if err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func toJSONString(v interface{}) []byte { + r, _ := json.MarshalIndent(v, "", " ") + return r +} diff --git a/vendor/github.com/jinzhu/gorm/query_test.go b/vendor/github.com/jinzhu/gorm/query_test.go new file mode 100644 index 000000000000..d6b23ddfe375 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/query_test.go @@ -0,0 +1,662 @@ +package gorm_test + +import ( + "fmt" + "reflect" + + "github.com/jinzhu/gorm" + + "testing" + "time" +) + +func TestFirstAndLast(t *testing.T) { + DB.Save(&User{Name: "user1", Emails: []Email{{Email: "user1@example.com"}}}) + DB.Save(&User{Name: "user2", Emails: []Email{{Email: "user2@example.com"}}}) + + var user1, user2, user3, user4 User + DB.First(&user1) + DB.Order("id").Limit(1).Find(&user2) + + ptrOfUser3 := &user3 + DB.Last(&ptrOfUser3) + DB.Order("id desc").Limit(1).Find(&user4) + if user1.Id != user2.Id || user3.Id != user4.Id { + t.Errorf("First and Last should by order by primary key") + } + + var users []User + DB.First(&users) + if len(users) != 1 { + t.Errorf("Find first record as slice") + } + + var user User + if DB.Joins("left join emails on emails.user_id = users.id").First(&user).Error != nil { + t.Errorf("Should not raise any error when order with Join table") + } + + if user.Email != "" { + t.Errorf("User's Email should be blank as no one set it") + } +} + +func TestFirstAndLastWithNoStdPrimaryKey(t *testing.T) { + DB.Save(&Animal{Name: "animal1"}) + DB.Save(&Animal{Name: "animal2"}) + + var animal1, animal2, animal3, animal4 Animal + DB.First(&animal1) + DB.Order("counter").Limit(1).Find(&animal2) + + DB.Last(&animal3) + DB.Order("counter desc").Limit(1).Find(&animal4) + if animal1.Counter != animal2.Counter || animal3.Counter != animal4.Counter { + t.Errorf("First and Last should work correctly") + } +} + +func TestFirstAndLastWithRaw(t *testing.T) { + user1 := User{Name: "user", Emails: []Email{{Email: "user1@example.com"}}} + user2 := User{Name: "user", Emails: []Email{{Email: "user2@example.com"}}} + DB.Save(&user1) + DB.Save(&user2) + + var user3, user4 User + DB.Raw("select * from users WHERE name = ?", "user").First(&user3) + if user3.Id != user1.Id { + t.Errorf("Find first record with raw") + } + + DB.Raw("select * from users WHERE name = ?", "user").Last(&user4) + if user4.Id != user2.Id { + t.Errorf("Find last record with raw") + } +} + +func TestUIntPrimaryKey(t *testing.T) { + var animal Animal + DB.First(&animal, uint64(1)) + if animal.Counter != 1 { + t.Errorf("Fetch a record from with a non-int primary key should work, but failed") + } + + DB.Model(Animal{}).Where(Animal{Counter: uint64(2)}).Scan(&animal) + if animal.Counter != 2 { + t.Errorf("Fetch a record from with a non-int primary key should work, but failed") + } +} + +func TestStringPrimaryKeyForNumericValueStartingWithZero(t *testing.T) { + type AddressByZipCode struct { + ZipCode string `gorm:"primary_key"` + Address string + } + + DB.AutoMigrate(&AddressByZipCode{}) + DB.Create(&AddressByZipCode{ZipCode: "00501", Address: "Holtsville"}) + + var address AddressByZipCode + DB.First(&address, "00501") + if address.ZipCode != "00501" { + t.Errorf("Fetch a record from with a string primary key for a numeric value starting with zero should work, but failed") + } +} + +func TestFindAsSliceOfPointers(t *testing.T) { + DB.Save(&User{Name: "user"}) + + var users []User + DB.Find(&users) + + var userPointers []*User + DB.Find(&userPointers) + + if len(users) == 0 || len(users) != len(userPointers) { + t.Errorf("Find slice of pointers") + } +} + +func TestSearchWithPlainSQL(t *testing.T) { + user1 := User{Name: "PlainSqlUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "PlainSqlUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "PlainSqlUser3", Age: 20, Birthday: parseTime("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + scopedb := DB.Where("name LIKE ?", "%PlainSqlUser%") + + if DB.Where("name = ?", user1.Name).First(&User{}).RecordNotFound() { + t.Errorf("Search with plain SQL") + } + + if DB.Where("name LIKE ?", "%"+user1.Name+"%").First(&User{}).RecordNotFound() { + t.Errorf("Search with plan SQL (regexp)") + } + + var users []User + DB.Find(&users, "name LIKE ? and age > ?", "%PlainSqlUser%", 1) + if len(users) != 2 { + t.Errorf("Should found 2 users that age > 1, but got %v", len(users)) + } + + DB.Where("name LIKE ?", "%PlainSqlUser%").Where("age >= ?", 1).Find(&users) + if len(users) != 3 { + t.Errorf("Should found 3 users that age >= 1, but got %v", len(users)) + } + + scopedb.Where("age <> ?", 20).Find(&users) + if len(users) != 2 { + t.Errorf("Should found 2 users age != 20, but got %v", len(users)) + } + + scopedb.Where("birthday > ?", parseTime("2000-1-1")).Find(&users) + if len(users) != 2 { + t.Errorf("Should found 2 users's birthday > 2000-1-1, but got %v", len(users)) + } + + scopedb.Where("birthday > ?", "2002-10-10").Find(&users) + if len(users) != 2 { + t.Errorf("Should found 2 users's birthday >= 2002-10-10, but got %v", len(users)) + } + + scopedb.Where("birthday >= ?", "2010-1-1").Where("birthday < ?", "2020-1-1").Find(&users) + if len(users) != 1 { + t.Errorf("Should found 1 users's birthday < 2020-1-1 and >= 2010-1-1, but got %v", len(users)) + } + + DB.Where("name in (?)", []string{user1.Name, user2.Name}).Find(&users) + if len(users) != 2 { + t.Errorf("Should found 2 users, but got %v", len(users)) + } + + DB.Where("id in (?)", []int64{user1.Id, user2.Id, user3.Id}).Find(&users) + if len(users) != 3 { + t.Errorf("Should found 3 users, but got %v", len(users)) + } + + DB.Where("id in (?)", user1.Id).Find(&users) + if len(users) != 1 { + t.Errorf("Should found 1 users, but got %v", len(users)) + } + + if err := DB.Where("id IN (?)", []string{}).Find(&users).Error; err != nil { + t.Error("no error should happen when query with empty slice, but got: ", err) + } + + if err := DB.Not("id IN (?)", []string{}).Find(&users).Error; err != nil { + t.Error("no error should happen when query with empty slice, but got: ", err) + } + + if DB.Where("name = ?", "none existing").Find(&[]User{}).RecordNotFound() { + t.Errorf("Should not get RecordNotFound error when looking for none existing records") + } +} + +func TestSearchWithStruct(t *testing.T) { + user1 := User{Name: "StructSearchUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "StructSearchUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "StructSearchUser3", Age: 20, Birthday: parseTime("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + if DB.Where(user1.Id).First(&User{}).RecordNotFound() { + t.Errorf("Search with primary key") + } + + if DB.First(&User{}, user1.Id).RecordNotFound() { + t.Errorf("Search with primary key as inline condition") + } + + if DB.First(&User{}, fmt.Sprintf("%v", user1.Id)).RecordNotFound() { + t.Errorf("Search with primary key as inline condition") + } + + var users []User + DB.Where([]int64{user1.Id, user2.Id, user3.Id}).Find(&users) + if len(users) != 3 { + t.Errorf("Should found 3 users when search with primary keys, but got %v", len(users)) + } + + var user User + DB.First(&user, &User{Name: user1.Name}) + if user.Id == 0 || user.Name != user1.Name { + t.Errorf("Search first record with inline pointer of struct") + } + + DB.First(&user, User{Name: user1.Name}) + if user.Id == 0 || user.Name != user.Name { + t.Errorf("Search first record with inline struct") + } + + DB.Where(&User{Name: user1.Name}).First(&user) + if user.Id == 0 || user.Name != user1.Name { + t.Errorf("Search first record with where struct") + } + + DB.Find(&users, &User{Name: user2.Name}) + if len(users) != 1 { + t.Errorf("Search all records with inline struct") + } +} + +func TestSearchWithMap(t *testing.T) { + companyID := 1 + user1 := User{Name: "MapSearchUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "MapSearchUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "MapSearchUser3", Age: 20, Birthday: parseTime("2020-1-1")} + user4 := User{Name: "MapSearchUser4", Age: 30, Birthday: parseTime("2020-1-1"), CompanyID: &companyID} + DB.Save(&user1).Save(&user2).Save(&user3).Save(&user4) + + var user User + DB.First(&user, map[string]interface{}{"name": user1.Name}) + if user.Id == 0 || user.Name != user1.Name { + t.Errorf("Search first record with inline map") + } + + user = User{} + DB.Where(map[string]interface{}{"name": user2.Name}).First(&user) + if user.Id == 0 || user.Name != user2.Name { + t.Errorf("Search first record with where map") + } + + var users []User + DB.Where(map[string]interface{}{"name": user3.Name}).Find(&users) + if len(users) != 1 { + t.Errorf("Search all records with inline map") + } + + DB.Find(&users, map[string]interface{}{"name": user3.Name}) + if len(users) != 1 { + t.Errorf("Search all records with inline map") + } + + DB.Find(&users, map[string]interface{}{"name": user4.Name, "company_id": nil}) + if len(users) != 0 { + t.Errorf("Search all records with inline map containing null value finding 0 records") + } + + DB.Find(&users, map[string]interface{}{"name": user1.Name, "company_id": nil}) + if len(users) != 1 { + t.Errorf("Search all records with inline map containing null value finding 1 record") + } + + DB.Find(&users, map[string]interface{}{"name": user4.Name, "company_id": companyID}) + if len(users) != 1 { + t.Errorf("Search all records with inline multiple value map") + } +} + +func TestSearchWithEmptyChain(t *testing.T) { + user1 := User{Name: "ChainSearchUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "ChainearchUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "ChainearchUser3", Age: 20, Birthday: parseTime("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + if DB.Where("").Where("").First(&User{}).Error != nil { + t.Errorf("Should not raise any error if searching with empty strings") + } + + if DB.Where(&User{}).Where("name = ?", user1.Name).First(&User{}).Error != nil { + t.Errorf("Should not raise any error if searching with empty struct") + } + + if DB.Where(map[string]interface{}{}).Where("name = ?", user1.Name).First(&User{}).Error != nil { + t.Errorf("Should not raise any error if searching with empty map") + } +} + +func TestSelect(t *testing.T) { + user1 := User{Name: "SelectUser1"} + DB.Save(&user1) + + var user User + DB.Where("name = ?", user1.Name).Select("name").Find(&user) + if user.Id != 0 { + t.Errorf("Should not have ID because only selected name, %+v", user.Id) + } + + if user.Name != user1.Name { + t.Errorf("Should have user Name when selected it") + } +} + +func TestOrderAndPluck(t *testing.T) { + user1 := User{Name: "OrderPluckUser1", Age: 1} + user2 := User{Name: "OrderPluckUser2", Age: 10} + user3 := User{Name: "OrderPluckUser3", Age: 20} + DB.Save(&user1).Save(&user2).Save(&user3) + scopedb := DB.Model(&User{}).Where("name like ?", "%OrderPluckUser%") + + var user User + scopedb.Order(gorm.Expr("name = ? DESC", "OrderPluckUser2")).First(&user) + if user.Name != "OrderPluckUser2" { + t.Errorf("Order with sql expression") + } + + var ages []int64 + scopedb.Order("age desc").Pluck("age", &ages) + if ages[0] != 20 { + t.Errorf("The first age should be 20 when order with age desc") + } + + var ages1, ages2 []int64 + scopedb.Order("age desc").Pluck("age", &ages1).Pluck("age", &ages2) + if !reflect.DeepEqual(ages1, ages2) { + t.Errorf("The first order is the primary order") + } + + var ages3, ages4 []int64 + scopedb.Model(&User{}).Order("age desc").Pluck("age", &ages3).Order("age", true).Pluck("age", &ages4) + if reflect.DeepEqual(ages3, ages4) { + t.Errorf("Reorder should work") + } + + var names []string + var ages5 []int64 + scopedb.Model(User{}).Order("name").Order("age desc").Pluck("age", &ages5).Pluck("name", &names) + if names != nil && ages5 != nil { + if !(names[0] == user1.Name && names[1] == user2.Name && names[2] == user3.Name && ages5[2] == 20) { + t.Errorf("Order with multiple orders") + } + } else { + t.Errorf("Order with multiple orders") + } + + DB.Model(User{}).Select("name, age").Find(&[]User{}) +} + +func TestLimit(t *testing.T) { + user1 := User{Name: "LimitUser1", Age: 1} + user2 := User{Name: "LimitUser2", Age: 10} + user3 := User{Name: "LimitUser3", Age: 20} + user4 := User{Name: "LimitUser4", Age: 10} + user5 := User{Name: "LimitUser5", Age: 20} + DB.Save(&user1).Save(&user2).Save(&user3).Save(&user4).Save(&user5) + + var users1, users2, users3 []User + DB.Order("age desc").Limit(3).Find(&users1).Limit(5).Find(&users2).Limit(-1).Find(&users3) + + if len(users1) != 3 || len(users2) != 5 || len(users3) <= 5 { + t.Errorf("Limit should works") + } +} + +func TestOffset(t *testing.T) { + for i := 0; i < 20; i++ { + DB.Save(&User{Name: fmt.Sprintf("OffsetUser%v", i)}) + } + var users1, users2, users3, users4 []User + DB.Limit(100).Order("age desc").Find(&users1).Offset(3).Find(&users2).Offset(5).Find(&users3).Offset(-1).Find(&users4) + + if (len(users1) != len(users4)) || (len(users1)-len(users2) != 3) || (len(users1)-len(users3) != 5) { + t.Errorf("Offset should work") + } +} + +func TestOr(t *testing.T) { + user1 := User{Name: "OrUser1", Age: 1} + user2 := User{Name: "OrUser2", Age: 10} + user3 := User{Name: "OrUser3", Age: 20} + DB.Save(&user1).Save(&user2).Save(&user3) + + var users []User + DB.Where("name = ?", user1.Name).Or("name = ?", user2.Name).Find(&users) + if len(users) != 2 { + t.Errorf("Find users with or") + } +} + +func TestCount(t *testing.T) { + user1 := User{Name: "CountUser1", Age: 1} + user2 := User{Name: "CountUser2", Age: 10} + user3 := User{Name: "CountUser3", Age: 20} + + DB.Save(&user1).Save(&user2).Save(&user3) + var count, count1, count2 int64 + var users []User + + if err := DB.Where("name = ?", user1.Name).Or("name = ?", user3.Name).Find(&users).Count(&count).Error; err != nil { + t.Errorf(fmt.Sprintf("Count should work, but got err %v", err)) + } + + if count != int64(len(users)) { + t.Errorf("Count() method should get correct value") + } + + DB.Model(&User{}).Where("name = ?", user1.Name).Count(&count1).Or("name in (?)", []string{user2.Name, user3.Name}).Count(&count2) + if count1 != 1 || count2 != 3 { + t.Errorf("Multiple count in chain") + } +} + +func TestNot(t *testing.T) { + DB.Create(getPreparedUser("user1", "not")) + DB.Create(getPreparedUser("user2", "not")) + DB.Create(getPreparedUser("user3", "not")) + + user4 := getPreparedUser("user4", "not") + user4.Company = Company{} + DB.Create(user4) + + DB := DB.Where("role = ?", "not") + + var users1, users2, users3, users4, users5, users6, users7, users8, users9 []User + if DB.Find(&users1).RowsAffected != 4 { + t.Errorf("should find 4 not users") + } + DB.Not(users1[0].Id).Find(&users2) + + if len(users1)-len(users2) != 1 { + t.Errorf("Should ignore the first users with Not") + } + + DB.Not([]int{}).Find(&users3) + if len(users1)-len(users3) != 0 { + t.Errorf("Should find all users with a blank condition") + } + + var name3Count int64 + DB.Table("users").Where("name = ?", "user3").Count(&name3Count) + DB.Not("name", "user3").Find(&users4) + if len(users1)-len(users4) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not("name = ?", "user3").Find(&users4) + if len(users1)-len(users4) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not("name <> ?", "user3").Find(&users4) + if len(users4) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not(User{Name: "user3"}).Find(&users5) + + if len(users1)-len(users5) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not(map[string]interface{}{"name": "user3"}).Find(&users6) + if len(users1)-len(users6) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not(map[string]interface{}{"name": "user3", "company_id": nil}).Find(&users7) + if len(users1)-len(users7) != 2 { // not user3 or user4 + t.Errorf("Should find all user's name not equal to 3 who do not have company id") + } + + DB.Not("name", []string{"user3"}).Find(&users8) + if len(users1)-len(users8) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + var name2Count int64 + DB.Table("users").Where("name = ?", "user2").Count(&name2Count) + DB.Not("name", []string{"user3", "user2"}).Find(&users9) + if len(users1)-len(users9) != (int(name3Count) + int(name2Count)) { + t.Errorf("Should find all users's name not equal 3") + } +} + +func TestFillSmallerStruct(t *testing.T) { + user1 := User{Name: "SmallerUser", Age: 100} + DB.Save(&user1) + type SimpleUser struct { + Name string + Id int64 + UpdatedAt time.Time + CreatedAt time.Time + } + + var simpleUser SimpleUser + DB.Table("users").Where("name = ?", user1.Name).First(&simpleUser) + + if simpleUser.Id == 0 || simpleUser.Name == "" { + t.Errorf("Should fill data correctly into smaller struct") + } +} + +func TestFindOrInitialize(t *testing.T) { + var user1, user2, user3, user4, user5, user6 User + DB.Where(&User{Name: "find or init", Age: 33}).FirstOrInit(&user1) + if user1.Name != "find or init" || user1.Id != 0 || user1.Age != 33 { + t.Errorf("user should be initialized with search value") + } + + DB.Where(User{Name: "find or init", Age: 33}).FirstOrInit(&user2) + if user2.Name != "find or init" || user2.Id != 0 || user2.Age != 33 { + t.Errorf("user should be initialized with search value") + } + + DB.FirstOrInit(&user3, map[string]interface{}{"name": "find or init 2"}) + if user3.Name != "find or init 2" || user3.Id != 0 { + t.Errorf("user should be initialized with inline search value") + } + + DB.Where(&User{Name: "find or init"}).Attrs(User{Age: 44}).FirstOrInit(&user4) + if user4.Name != "find or init" || user4.Id != 0 || user4.Age != 44 { + t.Errorf("user should be initialized with search value and attrs") + } + + DB.Where(&User{Name: "find or init"}).Assign("age", 44).FirstOrInit(&user4) + if user4.Name != "find or init" || user4.Id != 0 || user4.Age != 44 { + t.Errorf("user should be initialized with search value and assign attrs") + } + + DB.Save(&User{Name: "find or init", Age: 33}) + DB.Where(&User{Name: "find or init"}).Attrs("age", 44).FirstOrInit(&user5) + if user5.Name != "find or init" || user5.Id == 0 || user5.Age != 33 { + t.Errorf("user should be found and not initialized by Attrs") + } + + DB.Where(&User{Name: "find or init", Age: 33}).FirstOrInit(&user6) + if user6.Name != "find or init" || user6.Id == 0 || user6.Age != 33 { + t.Errorf("user should be found with FirstOrInit") + } + + DB.Where(&User{Name: "find or init"}).Assign(User{Age: 44}).FirstOrInit(&user6) + if user6.Name != "find or init" || user6.Id == 0 || user6.Age != 44 { + t.Errorf("user should be found and updated with assigned attrs") + } +} + +func TestFindOrCreate(t *testing.T) { + var user1, user2, user3, user4, user5, user6, user7, user8 User + DB.Where(&User{Name: "find or create", Age: 33}).FirstOrCreate(&user1) + if user1.Name != "find or create" || user1.Id == 0 || user1.Age != 33 { + t.Errorf("user should be created with search value") + } + + DB.Where(&User{Name: "find or create", Age: 33}).FirstOrCreate(&user2) + if user1.Id != user2.Id || user2.Name != "find or create" || user2.Id == 0 || user2.Age != 33 { + t.Errorf("user should be created with search value") + } + + DB.FirstOrCreate(&user3, map[string]interface{}{"name": "find or create 2"}) + if user3.Name != "find or create 2" || user3.Id == 0 { + t.Errorf("user should be created with inline search value") + } + + DB.Where(&User{Name: "find or create 3"}).Attrs("age", 44).FirstOrCreate(&user4) + if user4.Name != "find or create 3" || user4.Id == 0 || user4.Age != 44 { + t.Errorf("user should be created with search value and attrs") + } + + updatedAt1 := user4.UpdatedAt + DB.Where(&User{Name: "find or create 3"}).Assign("age", 55).FirstOrCreate(&user4) + if updatedAt1.Format(time.RFC3339Nano) == user4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("UpdateAt should be changed when update values with assign") + } + + DB.Where(&User{Name: "find or create 4"}).Assign(User{Age: 44}).FirstOrCreate(&user4) + if user4.Name != "find or create 4" || user4.Id == 0 || user4.Age != 44 { + t.Errorf("user should be created with search value and assigned attrs") + } + + DB.Where(&User{Name: "find or create"}).Attrs("age", 44).FirstOrInit(&user5) + if user5.Name != "find or create" || user5.Id == 0 || user5.Age != 33 { + t.Errorf("user should be found and not initialized by Attrs") + } + + DB.Where(&User{Name: "find or create"}).Assign(User{Age: 44}).FirstOrCreate(&user6) + if user6.Name != "find or create" || user6.Id == 0 || user6.Age != 44 { + t.Errorf("user should be found and updated with assigned attrs") + } + + DB.Where(&User{Name: "find or create"}).Find(&user7) + if user7.Name != "find or create" || user7.Id == 0 || user7.Age != 44 { + t.Errorf("user should be found and updated with assigned attrs") + } + + DB.Where(&User{Name: "find or create embedded struct"}).Assign(User{Age: 44, CreditCard: CreditCard{Number: "1231231231"}, Emails: []Email{{Email: "jinzhu@assign_embedded_struct.com"}, {Email: "jinzhu-2@assign_embedded_struct.com"}}}).FirstOrCreate(&user8) + if DB.Where("email = ?", "jinzhu-2@assign_embedded_struct.com").First(&Email{}).RecordNotFound() { + t.Errorf("embedded struct email should be saved") + } + + if DB.Where("email = ?", "1231231231").First(&CreditCard{}).RecordNotFound() { + t.Errorf("embedded struct credit card should be saved") + } +} + +func TestSelectWithEscapedFieldName(t *testing.T) { + user1 := User{Name: "EscapedFieldNameUser", Age: 1} + user2 := User{Name: "EscapedFieldNameUser", Age: 10} + user3 := User{Name: "EscapedFieldNameUser", Age: 20} + DB.Save(&user1).Save(&user2).Save(&user3) + + var names []string + DB.Model(User{}).Where(&User{Name: "EscapedFieldNameUser"}).Pluck("\"name\"", &names) + + if len(names) != 3 { + t.Errorf("Expected 3 name, but got: %d", len(names)) + } +} + +func TestSelectWithVariables(t *testing.T) { + DB.Save(&User{Name: "jinzhu"}) + + rows, _ := DB.Table("users").Select("? as fake", gorm.Expr("name")).Rows() + + if !rows.Next() { + t.Errorf("Should have returned at least one row") + } else { + columns, _ := rows.Columns() + if !reflect.DeepEqual(columns, []string{"fake"}) { + t.Errorf("Should only contains one column") + } + } + + rows.Close() +} + +func TestSelectWithArrayInput(t *testing.T) { + DB.Save(&User{Name: "jinzhu", Age: 42}) + + var user User + DB.Select([]string{"name", "age"}).Where("age = 42 AND name = 'jinzhu'").First(&user) + + if user.Name != "jinzhu" || user.Age != 42 { + t.Errorf("Should have selected both age and name") + } +} diff --git a/vendor/github.com/jinzhu/gorm/scaner_test.go b/vendor/github.com/jinzhu/gorm/scaner_test.go new file mode 100644 index 000000000000..fae9d3e168c4 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/scaner_test.go @@ -0,0 +1,137 @@ +package gorm_test + +import ( + "database/sql/driver" + "encoding/json" + "errors" + "testing" + + "github.com/jinzhu/gorm" +) + +func TestScannableSlices(t *testing.T) { + if err := DB.AutoMigrate(&RecordWithSlice{}).Error; err != nil { + t.Errorf("Should create table with slice values correctly: %s", err) + } + + r1 := RecordWithSlice{ + Strings: ExampleStringSlice{"a", "b", "c"}, + Structs: ExampleStructSlice{ + {"name1", "value1"}, + {"name2", "value2"}, + }, + } + + if err := DB.Save(&r1).Error; err != nil { + t.Errorf("Should save record with slice values") + } + + var r2 RecordWithSlice + + if err := DB.Find(&r2).Error; err != nil { + t.Errorf("Should fetch record with slice values") + } + + if len(r2.Strings) != 3 || r2.Strings[0] != "a" || r2.Strings[1] != "b" || r2.Strings[2] != "c" { + t.Errorf("Should have serialised and deserialised a string array") + } + + if len(r2.Structs) != 2 || r2.Structs[0].Name != "name1" || r2.Structs[0].Value != "value1" || r2.Structs[1].Name != "name2" || r2.Structs[1].Value != "value2" { + t.Errorf("Should have serialised and deserialised a struct array") + } +} + +type RecordWithSlice struct { + ID uint64 + Strings ExampleStringSlice `sql:"type:text"` + Structs ExampleStructSlice `sql:"type:text"` +} + +type ExampleStringSlice []string + +func (l ExampleStringSlice) Value() (driver.Value, error) { + return json.Marshal(l) +} + +func (l *ExampleStringSlice) Scan(input interface{}) error { + switch value := input.(type) { + case string: + return json.Unmarshal([]byte(value), l) + case []byte: + return json.Unmarshal(value, l) + default: + return errors.New("not supported") + } +} + +type ExampleStruct struct { + Name string + Value string +} + +type ExampleStructSlice []ExampleStruct + +func (l ExampleStructSlice) Value() (driver.Value, error) { + return json.Marshal(l) +} + +func (l *ExampleStructSlice) Scan(input interface{}) error { + switch value := input.(type) { + case string: + return json.Unmarshal([]byte(value), l) + case []byte: + return json.Unmarshal(value, l) + default: + return errors.New("not supported") + } +} + +type ScannerDataType struct { + Street string `sql:"TYPE:varchar(24)"` +} + +func (ScannerDataType) Value() (driver.Value, error) { + return nil, nil +} + +func (*ScannerDataType) Scan(input interface{}) error { + return nil +} + +type ScannerDataTypeTestStruct struct { + Field1 int + ScannerDataType *ScannerDataType `sql:"TYPE:json"` +} + +type ScannerDataType2 struct { + Street string `sql:"TYPE:varchar(24)"` +} + +func (ScannerDataType2) Value() (driver.Value, error) { + return nil, nil +} + +func (*ScannerDataType2) Scan(input interface{}) error { + return nil +} + +type ScannerDataTypeTestStruct2 struct { + Field1 int + ScannerDataType *ScannerDataType2 +} + +func TestScannerDataType(t *testing.T) { + scope := gorm.Scope{Value: &ScannerDataTypeTestStruct{}} + if field, ok := scope.FieldByName("ScannerDataType"); ok { + if DB.Dialect().DataTypeOf(field.StructField) != "json" { + t.Errorf("data type for scanner is wrong") + } + } + + scope = gorm.Scope{Value: &ScannerDataTypeTestStruct2{}} + if field, ok := scope.FieldByName("ScannerDataType"); ok { + if DB.Dialect().DataTypeOf(field.StructField) != "varchar(24)" { + t.Errorf("data type for scanner is wrong") + } + } +} diff --git a/vendor/github.com/jinzhu/gorm/scope_test.go b/vendor/github.com/jinzhu/gorm/scope_test.go new file mode 100644 index 000000000000..42458995d31a --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/scope_test.go @@ -0,0 +1,43 @@ +package gorm_test + +import ( + "github.com/jinzhu/gorm" + "testing" +) + +func NameIn1And2(d *gorm.DB) *gorm.DB { + return d.Where("name in (?)", []string{"ScopeUser1", "ScopeUser2"}) +} + +func NameIn2And3(d *gorm.DB) *gorm.DB { + return d.Where("name in (?)", []string{"ScopeUser2", "ScopeUser3"}) +} + +func NameIn(names []string) func(d *gorm.DB) *gorm.DB { + return func(d *gorm.DB) *gorm.DB { + return d.Where("name in (?)", names) + } +} + +func TestScopes(t *testing.T) { + user1 := User{Name: "ScopeUser1", Age: 1} + user2 := User{Name: "ScopeUser2", Age: 1} + user3 := User{Name: "ScopeUser3", Age: 2} + DB.Save(&user1).Save(&user2).Save(&user3) + + var users1, users2, users3 []User + DB.Scopes(NameIn1And2).Find(&users1) + if len(users1) != 2 { + t.Errorf("Should found two users's name in 1, 2") + } + + DB.Scopes(NameIn1And2, NameIn2And3).Find(&users2) + if len(users2) != 1 { + t.Errorf("Should found one user's name is 2") + } + + DB.Scopes(NameIn([]string{user1.Name, user3.Name})).Find(&users3) + if len(users3) != 2 { + t.Errorf("Should found two users's name in 1, 3") + } +} diff --git a/vendor/github.com/jinzhu/gorm/search_test.go b/vendor/github.com/jinzhu/gorm/search_test.go new file mode 100644 index 000000000000..4db7ab6a5204 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/search_test.go @@ -0,0 +1,30 @@ +package gorm + +import ( + "reflect" + "testing" +) + +func TestCloneSearch(t *testing.T) { + s := new(search) + s.Where("name = ?", "jinzhu").Order("name").Attrs("name", "jinzhu").Select("name, age") + + s1 := s.clone() + s1.Where("age = ?", 20).Order("age").Attrs("email", "a@e.org").Select("email") + + if reflect.DeepEqual(s.whereConditions, s1.whereConditions) { + t.Errorf("Where should be copied") + } + + if reflect.DeepEqual(s.orders, s1.orders) { + t.Errorf("Order should be copied") + } + + if reflect.DeepEqual(s.initAttrs, s1.initAttrs) { + t.Errorf("InitAttrs should be copied") + } + + if reflect.DeepEqual(s.Select, s1.Select) { + t.Errorf("selectStr should be copied") + } +} diff --git a/vendor/github.com/jinzhu/gorm/update_test.go b/vendor/github.com/jinzhu/gorm/update_test.go new file mode 100644 index 000000000000..85d53e5f06dc --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/update_test.go @@ -0,0 +1,465 @@ +package gorm_test + +import ( + "testing" + "time" + + "github.com/jinzhu/gorm" +) + +func TestUpdate(t *testing.T) { + product1 := Product{Code: "product1code"} + product2 := Product{Code: "product2code"} + + DB.Save(&product1).Save(&product2).Update("code", "product2newcode") + + if product2.Code != "product2newcode" { + t.Errorf("Record should be updated") + } + + DB.First(&product1, product1.Id) + DB.First(&product2, product2.Id) + updatedAt1 := product1.UpdatedAt + + if DB.First(&Product{}, "code = ?", product1.Code).RecordNotFound() { + t.Errorf("Product1 should not be updated") + } + + if !DB.First(&Product{}, "code = ?", "product2code").RecordNotFound() { + t.Errorf("Product2's code should be updated") + } + + if DB.First(&Product{}, "code = ?", "product2newcode").RecordNotFound() { + t.Errorf("Product2's code should be updated") + } + + DB.Table("products").Where("code in (?)", []string{"product1code"}).Update("code", "product1newcode") + + var product4 Product + DB.First(&product4, product1.Id) + if updatedAt1.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should be updated if something changed") + } + + if !DB.First(&Product{}, "code = 'product1code'").RecordNotFound() { + t.Errorf("Product1's code should be updated") + } + + if DB.First(&Product{}, "code = 'product1newcode'").RecordNotFound() { + t.Errorf("Product should not be changed to 789") + } + + if DB.Model(product2).Update("CreatedAt", time.Now().Add(time.Hour)).Error != nil { + t.Error("No error should raise when update with CamelCase") + } + + if DB.Model(&product2).UpdateColumn("CreatedAt", time.Now().Add(time.Hour)).Error != nil { + t.Error("No error should raise when update_column with CamelCase") + } + + var products []Product + DB.Find(&products) + if count := DB.Model(Product{}).Update("CreatedAt", time.Now().Add(2*time.Hour)).RowsAffected; count != int64(len(products)) { + t.Error("RowsAffected should be correct when do batch update") + } + + DB.First(&product4, product4.Id) + updatedAt4 := product4.UpdatedAt + DB.Model(&product4).Update("price", gorm.Expr("price + ? - ?", 100, 50)) + var product5 Product + DB.First(&product5, product4.Id) + if product5.Price != product4.Price+100-50 { + t.Errorf("Update with expression") + } + if product4.UpdatedAt.Format(time.RFC3339Nano) == updatedAt4.Format(time.RFC3339Nano) { + t.Errorf("Update with expression should update UpdatedAt") + } +} + +func TestUpdateWithNoStdPrimaryKeyAndDefaultValues(t *testing.T) { + animal := Animal{Name: "Ferdinand"} + DB.Save(&animal) + updatedAt1 := animal.UpdatedAt + + DB.Save(&animal).Update("name", "Francis") + + if updatedAt1.Format(time.RFC3339Nano) == animal.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should not be updated if nothing changed") + } + + var animals []Animal + DB.Find(&animals) + if count := DB.Model(Animal{}).Update("CreatedAt", time.Now().Add(2*time.Hour)).RowsAffected; count != int64(len(animals)) { + t.Error("RowsAffected should be correct when do batch update") + } + + animal = Animal{From: "somewhere"} // No name fields, should be filled with the default value (galeone) + DB.Save(&animal).Update("From", "a nice place") // The name field shoul be untouched + DB.First(&animal, animal.Counter) + if animal.Name != "galeone" { + t.Errorf("Name fields shouldn't be changed if untouched, but got %v", animal.Name) + } + + // When changing a field with a default value, the change must occur + animal.Name = "amazing horse" + DB.Save(&animal) + DB.First(&animal, animal.Counter) + if animal.Name != "amazing horse" { + t.Errorf("Update a filed with a default value should occur. But got %v\n", animal.Name) + } + + // When changing a field with a default value with blank value + animal.Name = "" + DB.Save(&animal) + DB.First(&animal, animal.Counter) + if animal.Name != "" { + t.Errorf("Update a filed to blank with a default value should occur. But got %v\n", animal.Name) + } +} + +func TestUpdates(t *testing.T) { + product1 := Product{Code: "product1code", Price: 10} + product2 := Product{Code: "product2code", Price: 10} + DB.Save(&product1).Save(&product2) + DB.Model(&product1).Updates(map[string]interface{}{"code": "product1newcode", "price": 100}) + if product1.Code != "product1newcode" || product1.Price != 100 { + t.Errorf("Record should be updated also with map") + } + + DB.First(&product1, product1.Id) + DB.First(&product2, product2.Id) + updatedAt2 := product2.UpdatedAt + + if DB.First(&Product{}, "code = ? and price = ?", product2.Code, product2.Price).RecordNotFound() { + t.Errorf("Product2 should not be updated") + } + + if DB.First(&Product{}, "code = ?", "product1newcode").RecordNotFound() { + t.Errorf("Product1 should be updated") + } + + DB.Table("products").Where("code in (?)", []string{"product2code"}).Updates(Product{Code: "product2newcode"}) + if !DB.First(&Product{}, "code = 'product2code'").RecordNotFound() { + t.Errorf("Product2's code should be updated") + } + + var product4 Product + DB.First(&product4, product2.Id) + if updatedAt2.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should be updated if something changed") + } + + if DB.First(&Product{}, "code = ?", "product2newcode").RecordNotFound() { + t.Errorf("product2's code should be updated") + } + + updatedAt4 := product4.UpdatedAt + DB.Model(&product4).Updates(map[string]interface{}{"price": gorm.Expr("price + ?", 100)}) + var product5 Product + DB.First(&product5, product4.Id) + if product5.Price != product4.Price+100 { + t.Errorf("Updates with expression") + } + // product4's UpdatedAt will be reset when updating + if product4.UpdatedAt.Format(time.RFC3339Nano) == updatedAt4.Format(time.RFC3339Nano) { + t.Errorf("Updates with expression should update UpdatedAt") + } +} + +func TestUpdateColumn(t *testing.T) { + product1 := Product{Code: "product1code", Price: 10} + product2 := Product{Code: "product2code", Price: 20} + DB.Save(&product1).Save(&product2).UpdateColumn(map[string]interface{}{"code": "product2newcode", "price": 100}) + if product2.Code != "product2newcode" || product2.Price != 100 { + t.Errorf("product 2 should be updated with update column") + } + + var product3 Product + DB.First(&product3, product1.Id) + if product3.Code != "product1code" || product3.Price != 10 { + t.Errorf("product 1 should not be updated") + } + + DB.First(&product2, product2.Id) + updatedAt2 := product2.UpdatedAt + DB.Model(product2).UpdateColumn("code", "update_column_new") + var product4 Product + DB.First(&product4, product2.Id) + if updatedAt2.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should not be updated with update column") + } + + DB.Model(&product4).UpdateColumn("price", gorm.Expr("price + 100 - 50")) + var product5 Product + DB.First(&product5, product4.Id) + if product5.Price != product4.Price+100-50 { + t.Errorf("UpdateColumn with expression") + } + if product5.UpdatedAt.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("UpdateColumn with expression should not update UpdatedAt") + } +} + +func TestSelectWithUpdate(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update") + DB.Create(user) + + var reloadUser User + DB.First(&reloadUser, user.Id) + reloadUser.Name = "new_name" + reloadUser.Age = 50 + reloadUser.BillingAddress = Address{Address1: "New Billing Address"} + reloadUser.ShippingAddress = Address{Address1: "New ShippingAddress Address"} + reloadUser.CreditCard = CreditCard{Number: "987654321"} + reloadUser.Emails = []Email{ + {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"}, + } + reloadUser.Company = Company{Name: "new company"} + + DB.Select("Name", "BillingAddress", "CreditCard", "Company", "Emails").Save(&reloadUser) + + var queryUser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id) + + if queryUser.Name == user.Name || queryUser.Age != user.Age { + t.Errorf("Should only update users with name column") + } + + if queryUser.BillingAddressID.Int64 == user.BillingAddressID.Int64 || + queryUser.ShippingAddressId != user.ShippingAddressId || + queryUser.CreditCard.ID == user.CreditCard.ID || + len(queryUser.Emails) == len(user.Emails) || queryUser.Company.Id == user.Company.Id { + t.Errorf("Should only update selected relationships") + } +} + +func TestSelectWithUpdateWithMap(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update_map") + DB.Create(user) + + updateValues := map[string]interface{}{ + "Name": "new_name", + "Age": 50, + "BillingAddress": Address{Address1: "New Billing Address"}, + "ShippingAddress": Address{Address1: "New ShippingAddress Address"}, + "CreditCard": CreditCard{Number: "987654321"}, + "Emails": []Email{ + {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"}, + }, + "Company": Company{Name: "new company"}, + } + + var reloadUser User + DB.First(&reloadUser, user.Id) + DB.Model(&reloadUser).Select("Name", "BillingAddress", "CreditCard", "Company", "Emails").Update(updateValues) + + var queryUser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id) + + if queryUser.Name == user.Name || queryUser.Age != user.Age { + t.Errorf("Should only update users with name column") + } + + if queryUser.BillingAddressID.Int64 == user.BillingAddressID.Int64 || + queryUser.ShippingAddressId != user.ShippingAddressId || + queryUser.CreditCard.ID == user.CreditCard.ID || + len(queryUser.Emails) == len(user.Emails) || queryUser.Company.Id == user.Company.Id { + t.Errorf("Should only update selected relationships") + } +} + +func TestOmitWithUpdate(t *testing.T) { + user := getPreparedUser("omit_user", "omit_with_update") + DB.Create(user) + + var reloadUser User + DB.First(&reloadUser, user.Id) + reloadUser.Name = "new_name" + reloadUser.Age = 50 + reloadUser.BillingAddress = Address{Address1: "New Billing Address"} + reloadUser.ShippingAddress = Address{Address1: "New ShippingAddress Address"} + reloadUser.CreditCard = CreditCard{Number: "987654321"} + reloadUser.Emails = []Email{ + {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"}, + } + reloadUser.Company = Company{Name: "new company"} + + DB.Omit("Name", "BillingAddress", "CreditCard", "Company", "Emails").Save(&reloadUser) + + var queryUser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id) + + if queryUser.Name != user.Name || queryUser.Age == user.Age { + t.Errorf("Should only update users with name column") + } + + if queryUser.BillingAddressID.Int64 != user.BillingAddressID.Int64 || + queryUser.ShippingAddressId == user.ShippingAddressId || + queryUser.CreditCard.ID != user.CreditCard.ID || + len(queryUser.Emails) != len(user.Emails) || queryUser.Company.Id != user.Company.Id { + t.Errorf("Should only update relationships that not omitted") + } +} + +func TestOmitWithUpdateWithMap(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update_map") + DB.Create(user) + + updateValues := map[string]interface{}{ + "Name": "new_name", + "Age": 50, + "BillingAddress": Address{Address1: "New Billing Address"}, + "ShippingAddress": Address{Address1: "New ShippingAddress Address"}, + "CreditCard": CreditCard{Number: "987654321"}, + "Emails": []Email{ + {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"}, + }, + "Company": Company{Name: "new company"}, + } + + var reloadUser User + DB.First(&reloadUser, user.Id) + DB.Model(&reloadUser).Omit("Name", "BillingAddress", "CreditCard", "Company", "Emails").Update(updateValues) + + var queryUser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id) + + if queryUser.Name != user.Name || queryUser.Age == user.Age { + t.Errorf("Should only update users with name column") + } + + if queryUser.BillingAddressID.Int64 != user.BillingAddressID.Int64 || + queryUser.ShippingAddressId == user.ShippingAddressId || + queryUser.CreditCard.ID != user.CreditCard.ID || + len(queryUser.Emails) != len(user.Emails) || queryUser.Company.Id != user.Company.Id { + t.Errorf("Should only update relationships not omitted") + } +} + +func TestSelectWithUpdateColumn(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update_map") + DB.Create(user) + + updateValues := map[string]interface{}{"Name": "new_name", "Age": 50} + + var reloadUser User + DB.First(&reloadUser, user.Id) + DB.Model(&reloadUser).Select("Name").UpdateColumn(updateValues) + + var queryUser User + DB.First(&queryUser, user.Id) + + if queryUser.Name == user.Name || queryUser.Age != user.Age { + t.Errorf("Should only update users with name column") + } +} + +func TestOmitWithUpdateColumn(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update_map") + DB.Create(user) + + updateValues := map[string]interface{}{"Name": "new_name", "Age": 50} + + var reloadUser User + DB.First(&reloadUser, user.Id) + DB.Model(&reloadUser).Omit("Name").UpdateColumn(updateValues) + + var queryUser User + DB.First(&queryUser, user.Id) + + if queryUser.Name != user.Name || queryUser.Age == user.Age { + t.Errorf("Should omit name column when update user") + } +} + +func TestUpdateColumnsSkipsAssociations(t *testing.T) { + user := getPreparedUser("update_columns_user", "special_role") + user.Age = 99 + address1 := "first street" + user.BillingAddress = Address{Address1: address1} + DB.Save(user) + + // Update a single field of the user and verify that the changed address is not stored. + newAge := int64(100) + user.BillingAddress.Address1 = "second street" + db := DB.Model(user).UpdateColumns(User{Age: newAge}) + if db.RowsAffected != 1 { + t.Errorf("Expected RowsAffected=1 but instead RowsAffected=%v", DB.RowsAffected) + } + + // Verify that Age now=`newAge`. + freshUser := &User{Id: user.Id} + DB.First(freshUser) + if freshUser.Age != newAge { + t.Errorf("Expected freshly queried user to have Age=%v but instead found Age=%v", newAge, freshUser.Age) + } + + // Verify that user's BillingAddress.Address1 is not changed and is still "first street". + DB.First(&freshUser.BillingAddress, freshUser.BillingAddressID) + if freshUser.BillingAddress.Address1 != address1 { + t.Errorf("Expected user's BillingAddress.Address1=%s to remain unchanged after UpdateColumns invocation, but BillingAddress.Address1=%s", address1, freshUser.BillingAddress.Address1) + } +} + +func TestUpdatesWithBlankValues(t *testing.T) { + product := Product{Code: "product1", Price: 10} + DB.Save(&product) + + DB.Model(&Product{Id: product.Id}).Updates(&Product{Price: 100}) + + var product1 Product + DB.First(&product1, product.Id) + + if product1.Code != "product1" || product1.Price != 100 { + t.Errorf("product's code should not be updated") + } +} + +type ElementWithIgnoredField struct { + Id int64 + Value string + IgnoredField int64 `sql:"-"` +} + +func (e ElementWithIgnoredField) TableName() string { + return "element_with_ignored_field" +} + +func TestUpdatesTableWithIgnoredValues(t *testing.T) { + elem := ElementWithIgnoredField{Value: "foo", IgnoredField: 10} + DB.Save(&elem) + + DB.Table(elem.TableName()). + Where("id = ?", elem.Id). + // DB.Model(&ElementWithIgnoredField{Id: elem.Id}). + Updates(&ElementWithIgnoredField{Value: "bar", IgnoredField: 100}) + + var elem1 ElementWithIgnoredField + err := DB.First(&elem1, elem.Id).Error + if err != nil { + t.Errorf("error getting an element from database: %s", err.Error()) + } + + if elem1.IgnoredField != 0 { + t.Errorf("element's ignored field should not be updated") + } +} + +func TestUpdateDecodeVirtualAttributes(t *testing.T) { + var user = User{ + Name: "jinzhu", + IgnoreMe: 88, + } + + DB.Save(&user) + + DB.Model(&user).Updates(User{Name: "jinzhu2", IgnoreMe: 100}) + + if user.IgnoreMe != 100 { + t.Errorf("should decode virtual attributes to struct, so it could be used in callbacks") + } +} diff --git a/vendor/github.com/jinzhu/gorm/utils_test.go b/vendor/github.com/jinzhu/gorm/utils_test.go new file mode 100644 index 000000000000..07f5b17f403c --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/utils_test.go @@ -0,0 +1,30 @@ +package gorm_test + +import ( + "testing" + + "github.com/jinzhu/gorm" +) + +func TestToDBNameGenerateFriendlyName(t *testing.T) { + var maps = map[string]string{ + "": "", + "ThisIsATest": "this_is_a_test", + "PFAndESI": "pf_and_esi", + "AbcAndJkl": "abc_and_jkl", + "EmployeeID": "employee_id", + "SKU_ID": "sku_id", + "HTTPAndSMTP": "http_and_smtp", + "HTTPServerHandlerForURLID": "http_server_handler_for_url_id", + "UUID": "uuid", + "HTTPURL": "http_url", + "HTTP_URL": "http_url", + "ThisIsActuallyATestSoWeMayBeAbleToUseThisCodeInGormPackageAlsoIdCanBeUsedAtTheEndAsID": "this_is_actually_a_test_so_we_may_be_able_to_use_this_code_in_gorm_package_also_id_can_be_used_at_the_end_as_id", + } + + for key, value := range maps { + if gorm.ToDBName(key) != value { + t.Errorf("%v ToDBName should equal %v, but got %v", key, value, gorm.ToDBName(key)) + } + } +} diff --git a/vendor/github.com/jinzhu/inflection/BUILD b/vendor/github.com/jinzhu/inflection/BUILD index 6bb22125ab4b..2aa610ab25f7 100644 --- a/vendor/github.com/jinzhu/inflection/BUILD +++ b/vendor/github.com/jinzhu/inflection/BUILD @@ -1,14 +1,17 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["inflections.go"], importpath = "github.com/jinzhu/inflection", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_xtest", + srcs = ["inflections_test.go"], + importpath = "github.com/jinzhu/inflection_test", + deps = ["//vendor/github.com/qor/inflection:go_default_library"], ) filegroup( @@ -22,4 +25,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/jinzhu/inflection/inflections_test.go b/vendor/github.com/jinzhu/inflection/inflections_test.go new file mode 100644 index 000000000000..448573d22e23 --- /dev/null +++ b/vendor/github.com/jinzhu/inflection/inflections_test.go @@ -0,0 +1,97 @@ +package inflection_test + +import ( + "strings" + "testing" + + "github.com/qor/inflection" +) + +var inflections = map[string]string{ + "star": "stars", + "STAR": "STARS", + "Star": "Stars", + "bus": "buses", + "fish": "fish", + "mouse": "mice", + "query": "queries", + "ability": "abilities", + "agency": "agencies", + "movie": "movies", + "archive": "archives", + "index": "indices", + "wife": "wives", + "safe": "saves", + "half": "halves", + "move": "moves", + "salesperson": "salespeople", + "person": "people", + "spokesman": "spokesmen", + "man": "men", + "woman": "women", + "basis": "bases", + "diagnosis": "diagnoses", + "diagnosis_a": "diagnosis_as", + "datum": "data", + "medium": "media", + "stadium": "stadia", + "analysis": "analyses", + "node_child": "node_children", + "child": "children", + "experience": "experiences", + "day": "days", + "comment": "comments", + "foobar": "foobars", + "newsletter": "newsletters", + "old_news": "old_news", + "news": "news", + "series": "series", + "species": "species", + "quiz": "quizzes", + "perspective": "perspectives", + "ox": "oxen", + "photo": "photos", + "buffalo": "buffaloes", + "tomato": "tomatoes", + "dwarf": "dwarves", + "elf": "elves", + "information": "information", + "equipment": "equipment", + "criterion": "criteria", +} + +func init() { + inflection.AddIrregular("criterion", "criteria") +} + +func TestPlural(t *testing.T) { + for key, value := range inflections { + if v := inflection.Plural(strings.ToUpper(key)); v != strings.ToUpper(value) { + t.Errorf("%v's plural should be %v, but got %v", strings.ToUpper(key), strings.ToUpper(value), v) + } + + if v := inflection.Plural(strings.Title(key)); v != strings.Title(value) { + t.Errorf("%v's plural should be %v, but got %v", strings.Title(key), strings.Title(value), v) + } + + if v := inflection.Plural(key); v != value { + t.Errorf("%v's plural should be %v, but got %v", key, value, v) + } + } +} + +func TestSingular(t *testing.T) { + for key, value := range inflections { + if v := inflection.Singular(strings.ToUpper(value)); v != strings.ToUpper(key) { + t.Errorf("%v's singular should be %v, but got %v", strings.ToUpper(value), strings.ToUpper(key), v) + } + + if v := inflection.Singular(strings.Title(value)); v != strings.Title(key) { + t.Errorf("%v's singular should be %v, but got %v", strings.Title(value), strings.Title(key), v) + } + + if v := inflection.Singular(value); v != key { + t.Errorf("%v's singular should be %v, but got %v", value, key, v) + } + } +} diff --git a/vendor/github.com/mattn/go-sqlite3/BUILD b/vendor/github.com/mattn/go-sqlite3/BUILD index 1a4bff5a2220..1f9dd9c110b5 100644 --- a/vendor/github.com/mattn/go-sqlite3/BUILD +++ b/vendor/github.com/mattn/go-sqlite3/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -46,6 +41,20 @@ go_library( "//conditions:default": [], }), importpath = "github.com/mattn/go-sqlite3", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = [ + "callback_test.go", + "error_test.go", + "sqlite3_fts3_test.go", + "sqlite3_test.go", + ], + importpath = "github.com/mattn/go-sqlite3", + library = ":go_default_library", + deps = ["//vendor/github.com/mattn/go-sqlite3/sqlite3_test:go_default_library"], ) filegroup( @@ -59,4 +68,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/mattn/go-sqlite3/callback_test.go b/vendor/github.com/mattn/go-sqlite3/callback_test.go new file mode 100644 index 000000000000..5c61f4439f95 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/callback_test.go @@ -0,0 +1,97 @@ +package sqlite3 + +import ( + "errors" + "math" + "reflect" + "testing" +) + +func TestCallbackArgCast(t *testing.T) { + intConv := callbackSyntheticForTests(reflect.ValueOf(int64(math.MaxInt64)), nil) + floatConv := callbackSyntheticForTests(reflect.ValueOf(float64(math.MaxFloat64)), nil) + errConv := callbackSyntheticForTests(reflect.Value{}, errors.New("test")) + + tests := []struct { + f callbackArgConverter + o reflect.Value + }{ + {intConv, reflect.ValueOf(int8(-1))}, + {intConv, reflect.ValueOf(int16(-1))}, + {intConv, reflect.ValueOf(int32(-1))}, + {intConv, reflect.ValueOf(uint8(math.MaxUint8))}, + {intConv, reflect.ValueOf(uint16(math.MaxUint16))}, + {intConv, reflect.ValueOf(uint32(math.MaxUint32))}, + // Special case, int64->uint64 is only 1<<63 - 1, not 1<<64 - 1 + {intConv, reflect.ValueOf(uint64(math.MaxInt64))}, + {floatConv, reflect.ValueOf(float32(math.Inf(1)))}, + } + + for _, test := range tests { + conv := callbackArgCast{test.f, test.o.Type()} + val, err := conv.Run(nil) + if err != nil { + t.Errorf("Couldn't convert to %s: %s", test.o.Type(), err) + } else if !reflect.DeepEqual(val.Interface(), test.o.Interface()) { + t.Errorf("Unexpected result from converting to %s: got %v, want %v", test.o.Type(), val.Interface(), test.o.Interface()) + } + } + + conv := callbackArgCast{errConv, reflect.TypeOf(int8(0))} + _, err := conv.Run(nil) + if err == nil { + t.Errorf("Expected error during callbackArgCast, but got none") + } +} + +func TestCallbackConverters(t *testing.T) { + tests := []struct { + v interface{} + err bool + }{ + // Unfortunately, we can't tell which converter was returned, + // but we can at least check which types can be converted. + {[]byte{0}, false}, + {"text", false}, + {true, false}, + {int8(0), false}, + {int16(0), false}, + {int32(0), false}, + {int64(0), false}, + {uint8(0), false}, + {uint16(0), false}, + {uint32(0), false}, + {uint64(0), false}, + {int(0), false}, + {uint(0), false}, + {float64(0), false}, + {float32(0), false}, + + {func() {}, true}, + {complex64(complex(0, 0)), true}, + {complex128(complex(0, 0)), true}, + {struct{}{}, true}, + {map[string]string{}, true}, + {[]string{}, true}, + {(*int8)(nil), true}, + {make(chan int), true}, + } + + for _, test := range tests { + _, err := callbackArg(reflect.TypeOf(test.v)) + if test.err && err == nil { + t.Errorf("Expected an error when converting %s, got no error", reflect.TypeOf(test.v)) + } else if !test.err && err != nil { + t.Errorf("Expected converter when converting %s, got error: %s", reflect.TypeOf(test.v), err) + } + } + + for _, test := range tests { + _, err := callbackRet(reflect.TypeOf(test.v)) + if test.err && err == nil { + t.Errorf("Expected an error when converting %s, got no error", reflect.TypeOf(test.v)) + } else if !test.err && err != nil { + t.Errorf("Expected converter when converting %s, got error: %s", reflect.TypeOf(test.v), err) + } + } +} diff --git a/vendor/github.com/mattn/go-sqlite3/error_test.go b/vendor/github.com/mattn/go-sqlite3/error_test.go new file mode 100644 index 000000000000..1ccbe5bf8583 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/error_test.go @@ -0,0 +1,242 @@ +// Copyright (C) 2014 Yasuhiro Matsumoto . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package sqlite3 + +import ( + "database/sql" + "io/ioutil" + "os" + "path" + "testing" +) + +func TestSimpleError(t *testing.T) { + e := ErrError.Error() + if e != "SQL logic error or missing database" { + t.Error("wrong error code:" + e) + } +} + +func TestCorruptDbErrors(t *testing.T) { + dirName, err := ioutil.TempDir("", "sqlite3") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dirName) + + dbFileName := path.Join(dirName, "test.db") + f, err := os.Create(dbFileName) + if err != nil { + t.Error(err) + } + f.Write([]byte{1, 2, 3, 4, 5}) + f.Close() + + db, err := sql.Open("sqlite3", dbFileName) + if err == nil { + _, err = db.Exec("drop table foo") + } + + sqliteErr := err.(Error) + if sqliteErr.Code != ErrNotADB { + t.Error("wrong error code for corrupted DB") + } + if err.Error() == "" { + t.Error("wrong error string for corrupted DB") + } + db.Close() +} + +func TestSqlLogicErrors(t *testing.T) { + dirName, err := ioutil.TempDir("", "sqlite3") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dirName) + + dbFileName := path.Join(dirName, "test.db") + db, err := sql.Open("sqlite3", dbFileName) + if err != nil { + t.Error(err) + } + defer db.Close() + + _, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)") + if err != nil { + t.Error(err) + } + + const expectedErr = "table Foo already exists" + _, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)") + if err.Error() != expectedErr { + t.Errorf("Unexpected error: %s, expected %s", err.Error(), expectedErr) + } + +} + +func TestExtendedErrorCodes_ForeignKey(t *testing.T) { + dirName, err := ioutil.TempDir("", "sqlite3-err") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dirName) + + dbFileName := path.Join(dirName, "test.db") + db, err := sql.Open("sqlite3", dbFileName) + if err != nil { + t.Error(err) + } + defer db.Close() + + _, err = db.Exec("PRAGMA foreign_keys=ON;") + if err != nil { + t.Errorf("PRAGMA foreign_keys=ON: %v", err) + } + + _, err = db.Exec(`CREATE TABLE Foo ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + value INTEGER NOT NULL, + ref INTEGER NULL REFERENCES Foo (id), + UNIQUE(value) + );`) + if err != nil { + t.Error(err) + } + + _, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (100, 100);") + if err == nil { + t.Error("No error!") + } else { + sqliteErr := err.(Error) + if sqliteErr.Code != ErrConstraint { + t.Errorf("Wrong basic error code: %d != %d", + sqliteErr.Code, ErrConstraint) + } + if sqliteErr.ExtendedCode != ErrConstraintForeignKey { + t.Errorf("Wrong extended error code: %d != %d", + sqliteErr.ExtendedCode, ErrConstraintForeignKey) + } + } + +} + +func TestExtendedErrorCodes_NotNull(t *testing.T) { + dirName, err := ioutil.TempDir("", "sqlite3-err") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dirName) + + dbFileName := path.Join(dirName, "test.db") + db, err := sql.Open("sqlite3", dbFileName) + if err != nil { + t.Error(err) + } + defer db.Close() + + _, err = db.Exec("PRAGMA foreign_keys=ON;") + if err != nil { + t.Errorf("PRAGMA foreign_keys=ON: %v", err) + } + + _, err = db.Exec(`CREATE TABLE Foo ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + value INTEGER NOT NULL, + ref INTEGER NULL REFERENCES Foo (id), + UNIQUE(value) + );`) + if err != nil { + t.Error(err) + } + + res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);") + if err != nil { + t.Fatalf("Creating first row: %v", err) + } + + id, err := res.LastInsertId() + if err != nil { + t.Fatalf("Retrieving last insert id: %v", err) + } + + _, err = db.Exec("INSERT INTO Foo (ref) VALUES (?);", id) + if err == nil { + t.Error("No error!") + } else { + sqliteErr := err.(Error) + if sqliteErr.Code != ErrConstraint { + t.Errorf("Wrong basic error code: %d != %d", + sqliteErr.Code, ErrConstraint) + } + if sqliteErr.ExtendedCode != ErrConstraintNotNull { + t.Errorf("Wrong extended error code: %d != %d", + sqliteErr.ExtendedCode, ErrConstraintNotNull) + } + } + +} + +func TestExtendedErrorCodes_Unique(t *testing.T) { + dirName, err := ioutil.TempDir("", "sqlite3-err") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dirName) + + dbFileName := path.Join(dirName, "test.db") + db, err := sql.Open("sqlite3", dbFileName) + if err != nil { + t.Error(err) + } + defer db.Close() + + _, err = db.Exec("PRAGMA foreign_keys=ON;") + if err != nil { + t.Errorf("PRAGMA foreign_keys=ON: %v", err) + } + + _, err = db.Exec(`CREATE TABLE Foo ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + value INTEGER NOT NULL, + ref INTEGER NULL REFERENCES Foo (id), + UNIQUE(value) + );`) + if err != nil { + t.Error(err) + } + + res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);") + if err != nil { + t.Fatalf("Creating first row: %v", err) + } + + id, err := res.LastInsertId() + if err != nil { + t.Fatalf("Retrieving last insert id: %v", err) + } + + _, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (?, 100);", id) + if err == nil { + t.Error("No error!") + } else { + sqliteErr := err.(Error) + if sqliteErr.Code != ErrConstraint { + t.Errorf("Wrong basic error code: %d != %d", + sqliteErr.Code, ErrConstraint) + } + if sqliteErr.ExtendedCode != ErrConstraintUnique { + t.Errorf("Wrong extended error code: %d != %d", + sqliteErr.ExtendedCode, ErrConstraintUnique) + } + extended := sqliteErr.Code.Extend(3).Error() + expected := "constraint failed" + if extended != expected { + t.Errorf("Wrong basic error code: %q != %q", + extended, expected) + } + } + +} diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_fts3_test.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_fts3_test.go new file mode 100644 index 000000000000..e06fc5d1d09d --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_fts3_test.go @@ -0,0 +1,130 @@ +// Copyright (C) 2015 Yasuhiro Matsumoto . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package sqlite3 + +import ( + "database/sql" + "os" + "testing" +) + +func TestFTS3(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + _, err = db.Exec("DROP TABLE foo") + _, err = db.Exec("CREATE VIRTUAL TABLE foo USING fts3(id INTEGER PRIMARY KEY, value TEXT)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + _, err = db.Exec("INSERT INTO foo(id, value) VALUES(?, ?)", 1, `今日の 晩御飯は 天麩羅よ`) + if err != nil { + t.Fatal("Failed to insert value:", err) + } + + _, err = db.Exec("INSERT INTO foo(id, value) VALUES(?, ?)", 2, `今日は いい 天気だ`) + if err != nil { + t.Fatal("Failed to insert value:", err) + } + + rows, err := db.Query("SELECT id, value FROM foo WHERE value MATCH '今日* 天*'") + if err != nil { + t.Fatal("Unable to query foo table:", err) + } + defer rows.Close() + + for rows.Next() { + var id int + var value string + + if err := rows.Scan(&id, &value); err != nil { + t.Error("Unable to scan results:", err) + continue + } + + if id == 1 && value != `今日の 晩御飯は 天麩羅よ` { + t.Error("Value for id 1 should be `今日の 晩御飯は 天麩羅よ`, but:", value) + } else if id == 2 && value != `今日は いい 天気だ` { + t.Error("Value for id 2 should be `今日は いい 天気だ`, but:", value) + } + } + + rows, err = db.Query("SELECT value FROM foo WHERE value MATCH '今日* 天麩羅*'") + if err != nil { + t.Fatal("Unable to query foo table:", err) + } + defer rows.Close() + + var value string + if !rows.Next() { + t.Fatal("Result should be only one") + } + + if err := rows.Scan(&value); err != nil { + t.Fatal("Unable to scan results:", err) + } + + if value != `今日の 晩御飯は 天麩羅よ` { + t.Fatal("Value should be `今日の 晩御飯は 天麩羅よ`, but:", value) + } + + if rows.Next() { + t.Fatal("Result should be only one") + } +} + +func TestFTS4(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + _, err = db.Exec("DROP TABLE foo") + _, err = db.Exec("CREATE VIRTUAL TABLE foo USING fts4(tokenize=unicode61, id INTEGER PRIMARY KEY, value TEXT)") + switch { + case err != nil && err.Error() == "unknown tokenizer: unicode61": + t.Skip("FTS4 not supported") + case err != nil: + t.Fatal("Failed to create table:", err) + } + + _, err = db.Exec("INSERT INTO foo(id, value) VALUES(?, ?)", 1, `février`) + if err != nil { + t.Fatal("Failed to insert value:", err) + } + + rows, err := db.Query("SELECT value FROM foo WHERE value MATCH 'fevrier'") + if err != nil { + t.Fatal("Unable to query foo table:", err) + } + defer rows.Close() + + var value string + if !rows.Next() { + t.Fatal("Result should be only one") + } + + if err := rows.Scan(&value); err != nil { + t.Fatal("Unable to scan results:", err) + } + + if value != `février` { + t.Fatal("Value should be `février`, but:", value) + } + + if rows.Next() { + t.Fatal("Result should be only one") + } +} diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_test.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_test.go new file mode 100644 index 000000000000..44ec64084f3c --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_test.go @@ -0,0 +1,1350 @@ +// Copyright (C) 2014 Yasuhiro Matsumoto . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package sqlite3 + +import ( + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "reflect" + "regexp" + "strings" + "sync" + "testing" + "time" + + "github.com/mattn/go-sqlite3/sqlite3_test" +) + +func TempFilename(t *testing.T) string { + f, err := ioutil.TempFile("", "go-sqlite3-test-") + if err != nil { + t.Fatal(err) + } + f.Close() + return f.Name() +} + +func doTestOpen(t *testing.T, option string) (string, error) { + var url string + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + if option != "" { + url = tempFilename + option + } else { + url = tempFilename + } + db, err := sql.Open("sqlite3", url) + if err != nil { + return "Failed to open database:", err + } + defer os.Remove(tempFilename) + defer db.Close() + + _, err = db.Exec("drop table foo") + _, err = db.Exec("create table foo (id integer)") + if err != nil { + return "Failed to create table:", err + } + + if stat, err := os.Stat(tempFilename); err != nil || stat.IsDir() { + return "Failed to create ./foo.db", nil + } + + return "", nil +} + +func TestOpen(t *testing.T) { + cases := map[string]bool{ + "": true, + "?_txlock=immediate": true, + "?_txlock=deferred": true, + "?_txlock=exclusive": true, + "?_txlock=bogus": false, + } + for option, expectedPass := range cases { + result, err := doTestOpen(t, option) + if result == "" { + if !expectedPass { + errmsg := fmt.Sprintf("_txlock error not caught at dbOpen with option: %s", option) + t.Fatal(errmsg) + } + } else if expectedPass { + if err == nil { + t.Fatal(result) + } else { + t.Fatal(result, err) + } + } + } +} + +func TestReadonly(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + + db1, err := sql.Open("sqlite3", "file:"+tempFilename) + if err != nil { + t.Fatal(err) + } + db1.Exec("CREATE TABLE test (x int, y float)") + + db2, err := sql.Open("sqlite3", "file:"+tempFilename+"?mode=ro") + if err != nil { + t.Fatal(err) + } + _ = db2 + _, err = db2.Exec("INSERT INTO test VALUES (1, 3.14)") + if err == nil { + t.Fatal("didn't expect INSERT into read-only database to work") + } +} + +func TestClose(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + + _, err = db.Exec("drop table foo") + _, err = db.Exec("create table foo (id integer)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + stmt, err := db.Prepare("select id from foo where id = ?") + if err != nil { + t.Fatal("Failed to select records:", err) + } + + db.Close() + _, err = stmt.Exec(1) + if err == nil { + t.Fatal("Failed to operate closed statement") + } +} + +func TestInsert(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + _, err = db.Exec("drop table foo") + _, err = db.Exec("create table foo (id integer)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + res, err := db.Exec("insert into foo(id) values(123)") + if err != nil { + t.Fatal("Failed to insert record:", err) + } + affected, _ := res.RowsAffected() + if affected != 1 { + t.Fatalf("Expected %d for affected rows, but %d:", 1, affected) + } + + rows, err := db.Query("select id from foo") + if err != nil { + t.Fatal("Failed to select records:", err) + } + defer rows.Close() + + rows.Next() + + var result int + rows.Scan(&result) + if result != 123 { + t.Errorf("Expected %d for fetched result, but %d:", 123, result) + } +} + +func TestUpdate(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + _, err = db.Exec("drop table foo") + _, err = db.Exec("create table foo (id integer)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + res, err := db.Exec("insert into foo(id) values(123)") + if err != nil { + t.Fatal("Failed to insert record:", err) + } + expected, err := res.LastInsertId() + if err != nil { + t.Fatal("Failed to get LastInsertId:", err) + } + affected, _ := res.RowsAffected() + if err != nil { + t.Fatal("Failed to get RowsAffected:", err) + } + if affected != 1 { + t.Fatalf("Expected %d for affected rows, but %d:", 1, affected) + } + + res, err = db.Exec("update foo set id = 234") + if err != nil { + t.Fatal("Failed to update record:", err) + } + lastId, err := res.LastInsertId() + if err != nil { + t.Fatal("Failed to get LastInsertId:", err) + } + if expected != lastId { + t.Errorf("Expected %q for last Id, but %q:", expected, lastId) + } + affected, _ = res.RowsAffected() + if err != nil { + t.Fatal("Failed to get RowsAffected:", err) + } + if affected != 1 { + t.Fatalf("Expected %d for affected rows, but %d:", 1, affected) + } + + rows, err := db.Query("select id from foo") + if err != nil { + t.Fatal("Failed to select records:", err) + } + defer rows.Close() + + rows.Next() + + var result int + rows.Scan(&result) + if result != 234 { + t.Errorf("Expected %d for fetched result, but %d:", 234, result) + } +} + +func TestDelete(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + _, err = db.Exec("drop table foo") + _, err = db.Exec("create table foo (id integer)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + res, err := db.Exec("insert into foo(id) values(123)") + if err != nil { + t.Fatal("Failed to insert record:", err) + } + expected, err := res.LastInsertId() + if err != nil { + t.Fatal("Failed to get LastInsertId:", err) + } + affected, err := res.RowsAffected() + if err != nil { + t.Fatal("Failed to get RowsAffected:", err) + } + if affected != 1 { + t.Errorf("Expected %d for cout of affected rows, but %q:", 1, affected) + } + + res, err = db.Exec("delete from foo where id = 123") + if err != nil { + t.Fatal("Failed to delete record:", err) + } + lastId, err := res.LastInsertId() + if err != nil { + t.Fatal("Failed to get LastInsertId:", err) + } + if expected != lastId { + t.Errorf("Expected %q for last Id, but %q:", expected, lastId) + } + affected, err = res.RowsAffected() + if err != nil { + t.Fatal("Failed to get RowsAffected:", err) + } + if affected != 1 { + t.Errorf("Expected %d for cout of affected rows, but %q:", 1, affected) + } + + rows, err := db.Query("select id from foo") + if err != nil { + t.Fatal("Failed to select records:", err) + } + defer rows.Close() + + if rows.Next() { + t.Error("Fetched row but expected not rows") + } +} + +func TestBooleanRoundtrip(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + _, err = db.Exec("DROP TABLE foo") + _, err = db.Exec("CREATE TABLE foo(id INTEGER, value BOOL)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + _, err = db.Exec("INSERT INTO foo(id, value) VALUES(1, ?)", true) + if err != nil { + t.Fatal("Failed to insert true value:", err) + } + + _, err = db.Exec("INSERT INTO foo(id, value) VALUES(2, ?)", false) + if err != nil { + t.Fatal("Failed to insert false value:", err) + } + + rows, err := db.Query("SELECT id, value FROM foo") + if err != nil { + t.Fatal("Unable to query foo table:", err) + } + defer rows.Close() + + for rows.Next() { + var id int + var value bool + + if err := rows.Scan(&id, &value); err != nil { + t.Error("Unable to scan results:", err) + continue + } + + if id == 1 && !value { + t.Error("Value for id 1 should be true, not false") + + } else if id == 2 && value { + t.Error("Value for id 2 should be false, not true") + } + } +} + +func timezone(t time.Time) string { return t.Format("-07:00") } + +func TestTimestamp(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + _, err = db.Exec("DROP TABLE foo") + _, err = db.Exec("CREATE TABLE foo(id INTEGER, ts timeSTAMP, dt DATETIME)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + timestamp1 := time.Date(2012, time.April, 6, 22, 50, 0, 0, time.UTC) + timestamp2 := time.Date(2006, time.January, 2, 15, 4, 5, 123456789, time.UTC) + timestamp3 := time.Date(2012, time.November, 4, 0, 0, 0, 0, time.UTC) + tzTest := time.FixedZone("TEST", -9*3600-13*60) + tests := []struct { + value interface{} + expected time.Time + }{ + {"nonsense", time.Time{}}, + {"0000-00-00 00:00:00", time.Time{}}, + {timestamp1, timestamp1}, + {timestamp2.Unix(), timestamp2.Truncate(time.Second)}, + {timestamp2.UnixNano() / int64(time.Millisecond), timestamp2.Truncate(time.Millisecond)}, + {timestamp1.In(tzTest), timestamp1.In(tzTest)}, + {timestamp1.Format("2006-01-02 15:04:05.000"), timestamp1}, + {timestamp1.Format("2006-01-02T15:04:05.000"), timestamp1}, + {timestamp1.Format("2006-01-02 15:04:05"), timestamp1}, + {timestamp1.Format("2006-01-02T15:04:05"), timestamp1}, + {timestamp2, timestamp2}, + {"2006-01-02 15:04:05.123456789", timestamp2}, + {"2006-01-02T15:04:05.123456789", timestamp2}, + {"2006-01-02T05:51:05.123456789-09:13", timestamp2.In(tzTest)}, + {"2012-11-04", timestamp3}, + {"2012-11-04 00:00", timestamp3}, + {"2012-11-04 00:00:00", timestamp3}, + {"2012-11-04 00:00:00.000", timestamp3}, + {"2012-11-04T00:00", timestamp3}, + {"2012-11-04T00:00:00", timestamp3}, + {"2012-11-04T00:00:00.000", timestamp3}, + {"2006-01-02T15:04:05.123456789Z", timestamp2}, + {"2012-11-04Z", timestamp3}, + {"2012-11-04 00:00Z", timestamp3}, + {"2012-11-04 00:00:00Z", timestamp3}, + {"2012-11-04 00:00:00.000Z", timestamp3}, + {"2012-11-04T00:00Z", timestamp3}, + {"2012-11-04T00:00:00Z", timestamp3}, + {"2012-11-04T00:00:00.000Z", timestamp3}, + } + for i := range tests { + _, err = db.Exec("INSERT INTO foo(id, ts, dt) VALUES(?, ?, ?)", i, tests[i].value, tests[i].value) + if err != nil { + t.Fatal("Failed to insert timestamp:", err) + } + } + + rows, err := db.Query("SELECT id, ts, dt FROM foo ORDER BY id ASC") + if err != nil { + t.Fatal("Unable to query foo table:", err) + } + defer rows.Close() + + seen := 0 + for rows.Next() { + var id int + var ts, dt time.Time + + if err := rows.Scan(&id, &ts, &dt); err != nil { + t.Error("Unable to scan results:", err) + continue + } + if id < 0 || id >= len(tests) { + t.Error("Bad row id: ", id) + continue + } + seen++ + if !tests[id].expected.Equal(ts) { + t.Errorf("Timestamp value for id %v (%v) should be %v, not %v", id, tests[id].value, tests[id].expected, dt) + } + if !tests[id].expected.Equal(dt) { + t.Errorf("Datetime value for id %v (%v) should be %v, not %v", id, tests[id].value, tests[id].expected, dt) + } + if timezone(tests[id].expected) != timezone(ts) { + t.Errorf("Timezone for id %v (%v) should be %v, not %v", id, tests[id].value, + timezone(tests[id].expected), timezone(ts)) + } + if timezone(tests[id].expected) != timezone(dt) { + t.Errorf("Timezone for id %v (%v) should be %v, not %v", id, tests[id].value, + timezone(tests[id].expected), timezone(dt)) + } + } + + if seen != len(tests) { + t.Errorf("Expected to see %d rows", len(tests)) + } +} + +func TestBoolean(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + + defer db.Close() + + _, err = db.Exec("CREATE TABLE foo(id INTEGER, fbool BOOLEAN)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + bool1 := true + _, err = db.Exec("INSERT INTO foo(id, fbool) VALUES(1, ?)", bool1) + if err != nil { + t.Fatal("Failed to insert boolean:", err) + } + + bool2 := false + _, err = db.Exec("INSERT INTO foo(id, fbool) VALUES(2, ?)", bool2) + if err != nil { + t.Fatal("Failed to insert boolean:", err) + } + + bool3 := "nonsense" + _, err = db.Exec("INSERT INTO foo(id, fbool) VALUES(3, ?)", bool3) + if err != nil { + t.Fatal("Failed to insert nonsense:", err) + } + + rows, err := db.Query("SELECT id, fbool FROM foo where fbool = ?", bool1) + if err != nil { + t.Fatal("Unable to query foo table:", err) + } + counter := 0 + + var id int + var fbool bool + + for rows.Next() { + if err := rows.Scan(&id, &fbool); err != nil { + t.Fatal("Unable to scan results:", err) + } + counter++ + } + + if counter != 1 { + t.Fatalf("Expected 1 row but %v", counter) + } + + if id != 1 && fbool != true { + t.Fatalf("Value for id 1 should be %v, not %v", bool1, fbool) + } + + rows, err = db.Query("SELECT id, fbool FROM foo where fbool = ?", bool2) + if err != nil { + t.Fatal("Unable to query foo table:", err) + } + + counter = 0 + + for rows.Next() { + if err := rows.Scan(&id, &fbool); err != nil { + t.Fatal("Unable to scan results:", err) + } + counter++ + } + + if counter != 1 { + t.Fatalf("Expected 1 row but %v", counter) + } + + if id != 2 && fbool != false { + t.Fatalf("Value for id 2 should be %v, not %v", bool2, fbool) + } + + // make sure "nonsense" triggered an error + rows, err = db.Query("SELECT id, fbool FROM foo where id=?;", 3) + if err != nil { + t.Fatal("Unable to query foo table:", err) + } + + rows.Next() + err = rows.Scan(&id, &fbool) + if err == nil { + t.Error("Expected error from \"nonsense\" bool") + } +} + +func TestFloat32(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + _, err = db.Exec("CREATE TABLE foo(id INTEGER)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + _, err = db.Exec("INSERT INTO foo(id) VALUES(null)") + if err != nil { + t.Fatal("Failed to insert null:", err) + } + + rows, err := db.Query("SELECT id FROM foo") + if err != nil { + t.Fatal("Unable to query foo table:", err) + } + + if !rows.Next() { + t.Fatal("Unable to query results:", err) + } + + var id interface{} + if err := rows.Scan(&id); err != nil { + t.Fatal("Unable to scan results:", err) + } + if id != nil { + t.Error("Expected nil but not") + } +} + +func TestNull(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + rows, err := db.Query("SELECT 3.141592") + if err != nil { + t.Fatal("Unable to query foo table:", err) + } + + if !rows.Next() { + t.Fatal("Unable to query results:", err) + } + + var v interface{} + if err := rows.Scan(&v); err != nil { + t.Fatal("Unable to scan results:", err) + } + f, ok := v.(float64) + if !ok { + t.Error("Expected float but not") + } + if f != 3.141592 { + t.Error("Expected 3.141592 but not") + } +} + +func TestTransaction(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + _, err = db.Exec("CREATE TABLE foo(id INTEGER)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + tx, err := db.Begin() + if err != nil { + t.Fatal("Failed to begin transaction:", err) + } + + _, err = tx.Exec("INSERT INTO foo(id) VALUES(1)") + if err != nil { + t.Fatal("Failed to insert null:", err) + } + + rows, err := tx.Query("SELECT id from foo") + if err != nil { + t.Fatal("Unable to query foo table:", err) + } + + err = tx.Rollback() + if err != nil { + t.Fatal("Failed to rollback transaction:", err) + } + + if rows.Next() { + t.Fatal("Unable to query results:", err) + } + + tx, err = db.Begin() + if err != nil { + t.Fatal("Failed to begin transaction:", err) + } + + _, err = tx.Exec("INSERT INTO foo(id) VALUES(1)") + if err != nil { + t.Fatal("Failed to insert null:", err) + } + + err = tx.Commit() + if err != nil { + t.Fatal("Failed to commit transaction:", err) + } + + rows, err = tx.Query("SELECT id from foo") + if err == nil { + t.Fatal("Expected failure to query") + } +} + +func TestWAL(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + if _, err = db.Exec("PRAGMA journal_mode=WAL;"); err != nil { + t.Fatal("Failed to Exec PRAGMA journal_mode:", err) + } + if _, err = db.Exec("PRAGMA locking_mode=EXCLUSIVE;"); err != nil { + t.Fatal("Failed to Exec PRAGMA locking_mode:", err) + } + if _, err = db.Exec("CREATE TABLE test (id SERIAL, user TEXT NOT NULL, name TEXT NOT NULL);"); err != nil { + t.Fatal("Failed to Exec CREATE TABLE:", err) + } + if _, err = db.Exec("INSERT INTO test (user, name) VALUES ('user','name');"); err != nil { + t.Fatal("Failed to Exec INSERT:", err) + } + + trans, err := db.Begin() + if err != nil { + t.Fatal("Failed to Begin:", err) + } + s, err := trans.Prepare("INSERT INTO test (user, name) VALUES (?, ?);") + if err != nil { + t.Fatal("Failed to Prepare:", err) + } + + var count int + if err = trans.QueryRow("SELECT count(user) FROM test;").Scan(&count); err != nil { + t.Fatal("Failed to QueryRow:", err) + } + if _, err = s.Exec("bbbb", "aaaa"); err != nil { + t.Fatal("Failed to Exec prepared statement:", err) + } + if err = s.Close(); err != nil { + t.Fatal("Failed to Close prepared statement:", err) + } + if err = trans.Commit(); err != nil { + t.Fatal("Failed to Commit:", err) + } +} + +func TestTimezoneConversion(t *testing.T) { + zones := []string{"UTC", "US/Central", "US/Pacific", "Local"} + for _, tz := range zones { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename+"?_loc="+url.QueryEscape(tz)) + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + _, err = db.Exec("DROP TABLE foo") + _, err = db.Exec("CREATE TABLE foo(id INTEGER, ts TIMESTAMP, dt DATETIME)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + loc, err := time.LoadLocation(tz) + if err != nil { + t.Fatal("Failed to load location:", err) + } + + timestamp1 := time.Date(2012, time.April, 6, 22, 50, 0, 0, time.UTC) + timestamp2 := time.Date(2006, time.January, 2, 15, 4, 5, 123456789, time.UTC) + timestamp3 := time.Date(2012, time.November, 4, 0, 0, 0, 0, time.UTC) + tests := []struct { + value interface{} + expected time.Time + }{ + {"nonsense", time.Time{}.In(loc)}, + {"0000-00-00 00:00:00", time.Time{}.In(loc)}, + {timestamp1, timestamp1.In(loc)}, + {timestamp1.Unix(), timestamp1.In(loc)}, + {timestamp1.In(time.FixedZone("TEST", -7*3600)), timestamp1.In(loc)}, + {timestamp1.Format("2006-01-02 15:04:05.000"), timestamp1.In(loc)}, + {timestamp1.Format("2006-01-02T15:04:05.000"), timestamp1.In(loc)}, + {timestamp1.Format("2006-01-02 15:04:05"), timestamp1.In(loc)}, + {timestamp1.Format("2006-01-02T15:04:05"), timestamp1.In(loc)}, + {timestamp2, timestamp2.In(loc)}, + {"2006-01-02 15:04:05.123456789", timestamp2.In(loc)}, + {"2006-01-02T15:04:05.123456789", timestamp2.In(loc)}, + {"2012-11-04", timestamp3.In(loc)}, + {"2012-11-04 00:00", timestamp3.In(loc)}, + {"2012-11-04 00:00:00", timestamp3.In(loc)}, + {"2012-11-04 00:00:00.000", timestamp3.In(loc)}, + {"2012-11-04T00:00", timestamp3.In(loc)}, + {"2012-11-04T00:00:00", timestamp3.In(loc)}, + {"2012-11-04T00:00:00.000", timestamp3.In(loc)}, + } + for i := range tests { + _, err = db.Exec("INSERT INTO foo(id, ts, dt) VALUES(?, ?, ?)", i, tests[i].value, tests[i].value) + if err != nil { + t.Fatal("Failed to insert timestamp:", err) + } + } + + rows, err := db.Query("SELECT id, ts, dt FROM foo ORDER BY id ASC") + if err != nil { + t.Fatal("Unable to query foo table:", err) + } + defer rows.Close() + + seen := 0 + for rows.Next() { + var id int + var ts, dt time.Time + + if err := rows.Scan(&id, &ts, &dt); err != nil { + t.Error("Unable to scan results:", err) + continue + } + if id < 0 || id >= len(tests) { + t.Error("Bad row id: ", id) + continue + } + seen++ + if !tests[id].expected.Equal(ts) { + t.Errorf("Timestamp value for id %v (%v) should be %v, not %v", id, tests[id].value, tests[id].expected, ts) + } + if !tests[id].expected.Equal(dt) { + t.Errorf("Datetime value for id %v (%v) should be %v, not %v", id, tests[id].value, tests[id].expected, dt) + } + if tests[id].expected.Location().String() != ts.Location().String() { + t.Errorf("Location for id %v (%v) should be %v, not %v", id, tests[id].value, tests[id].expected.Location().String(), ts.Location().String()) + } + if tests[id].expected.Location().String() != dt.Location().String() { + t.Errorf("Location for id %v (%v) should be %v, not %v", id, tests[id].value, tests[id].expected.Location().String(), dt.Location().String()) + } + } + + if seen != len(tests) { + t.Errorf("Expected to see %d rows", len(tests)) + } + } +} + +func TestSuite(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename+"?_busy_timeout=99999") + if err != nil { + t.Fatal(err) + } + defer db.Close() + + sqlite3_test.RunTests(t, db, sqlite3_test.SQLITE) +} + +// TODO: Execer & Queryer currently disabled +// https://github.com/mattn/go-sqlite3/issues/82 +func TestExecer(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + _, err = db.Exec(` + create table foo (id integer); -- one comment + insert into foo(id) values(?); + insert into foo(id) values(?); + insert into foo(id) values(?); -- another comment + `, 1, 2, 3) + if err != nil { + t.Error("Failed to call db.Exec:", err) + } +} + +func TestQueryer(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + _, err = db.Exec(` + create table foo (id integer); + `) + if err != nil { + t.Error("Failed to call db.Query:", err) + } + + rows, err := db.Query(` + insert into foo(id) values(?); + insert into foo(id) values(?); + insert into foo(id) values(?); + select id from foo order by id; + `, 3, 2, 1) + if err != nil { + t.Error("Failed to call db.Query:", err) + } + defer rows.Close() + n := 1 + if rows != nil { + for rows.Next() { + var id int + err = rows.Scan(&id) + if err != nil { + t.Error("Failed to db.Query:", err) + } + if id != n { + t.Error("Failed to db.Query: not matched results") + } + } + } +} + +func TestStress(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + db.Exec("CREATE TABLE foo (id int);") + db.Exec("INSERT INTO foo VALUES(1);") + db.Exec("INSERT INTO foo VALUES(2);") + db.Close() + + for i := 0; i < 10000; i++ { + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + + for j := 0; j < 3; j++ { + rows, err := db.Query("select * from foo where id=1;") + if err != nil { + t.Error("Failed to call db.Query:", err) + } + for rows.Next() { + var i int + if err := rows.Scan(&i); err != nil { + t.Errorf("Scan failed: %v\n", err) + } + } + if err := rows.Err(); err != nil { + t.Errorf("Post-scan failed: %v\n", err) + } + rows.Close() + } + db.Close() + } +} + +func TestDateTimeLocal(t *testing.T) { + zone := "Asia/Tokyo" + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename+"?_loc="+zone) + if err != nil { + t.Fatal("Failed to open database:", err) + } + db.Exec("CREATE TABLE foo (dt datetime);") + db.Exec("INSERT INTO foo VALUES('2015-03-05 15:16:17');") + + row := db.QueryRow("select * from foo") + var d time.Time + err = row.Scan(&d) + if err != nil { + t.Fatal("Failed to scan datetime:", err) + } + if d.Hour() == 15 || !strings.Contains(d.String(), "JST") { + t.Fatal("Result should have timezone", d) + } + db.Close() + + db, err = sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + + row = db.QueryRow("select * from foo") + err = row.Scan(&d) + if err != nil { + t.Fatal("Failed to scan datetime:", err) + } + if d.UTC().Hour() != 15 || !strings.Contains(d.String(), "UTC") { + t.Fatalf("Result should not have timezone %v %v", zone, d.String()) + } + + _, err = db.Exec("DELETE FROM foo") + if err != nil { + t.Fatal("Failed to delete table:", err) + } + dt, err := time.Parse("2006/1/2 15/4/5 -0700 MST", "2015/3/5 15/16/17 +0900 JST") + if err != nil { + t.Fatal("Failed to parse datetime:", err) + } + db.Exec("INSERT INTO foo VALUES(?);", dt) + + db.Close() + db, err = sql.Open("sqlite3", tempFilename+"?_loc="+zone) + if err != nil { + t.Fatal("Failed to open database:", err) + } + + row = db.QueryRow("select * from foo") + err = row.Scan(&d) + if err != nil { + t.Fatal("Failed to scan datetime:", err) + } + if d.Hour() != 15 || !strings.Contains(d.String(), "JST") { + t.Fatalf("Result should have timezone %v %v", zone, d.String()) + } +} + +func TestVersion(t *testing.T) { + s, n, id := Version() + if s == "" || n == 0 || id == "" { + t.Errorf("Version failed %q, %d, %q\n", s, n, id) + } +} + +func TestNumberNamedParams(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + _, err = db.Exec(` + create table foo (id integer, name text, extra text); + `) + if err != nil { + t.Error("Failed to call db.Query:", err) + } + + _, err = db.Exec(`insert into foo(id, name, extra) values($1, $2, $2)`, 1, "foo") + if err != nil { + t.Error("Failed to call db.Exec:", err) + } + + row := db.QueryRow(`select id, extra from foo where id = $1 and extra = $2`, 1, "foo") + if row == nil { + t.Error("Failed to call db.QueryRow") + } + var id int + var extra string + err = row.Scan(&id, &extra) + if err != nil { + t.Error("Failed to db.Scan:", err) + } + if id != 1 || extra != "foo" { + t.Error("Failed to db.QueryRow: not matched results") + } +} + +func TestStringContainingZero(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + _, err = db.Exec(` + create table foo (id integer, name, extra text); + `) + if err != nil { + t.Error("Failed to call db.Query:", err) + } + + const text = "foo\x00bar" + + _, err = db.Exec(`insert into foo(id, name, extra) values($1, $2, $2)`, 1, text) + if err != nil { + t.Error("Failed to call db.Exec:", err) + } + + row := db.QueryRow(`select id, extra from foo where id = $1 and extra = $2`, 1, text) + if row == nil { + t.Error("Failed to call db.QueryRow") + } + + var id int + var extra string + err = row.Scan(&id, &extra) + if err != nil { + t.Error("Failed to db.Scan:", err) + } + if id != 1 || extra != text { + t.Error("Failed to db.QueryRow: not matched results") + } +} + +const CurrentTimeStamp = "2006-01-02 15:04:05" + +type TimeStamp struct{ *time.Time } + +func (t TimeStamp) Scan(value interface{}) error { + var err error + switch v := value.(type) { + case string: + *t.Time, err = time.Parse(CurrentTimeStamp, v) + case []byte: + *t.Time, err = time.Parse(CurrentTimeStamp, string(v)) + default: + err = errors.New("invalid type for current_timestamp") + } + return err +} + +func (t TimeStamp) Value() (driver.Value, error) { + return t.Time.Format(CurrentTimeStamp), nil +} + +func TestDateTimeNow(t *testing.T) { + tempFilename := TempFilename(t) + defer os.Remove(tempFilename) + db, err := sql.Open("sqlite3", tempFilename) + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + var d time.Time + err = db.QueryRow("SELECT datetime('now')").Scan(TimeStamp{&d}) + if err != nil { + t.Fatal("Failed to scan datetime:", err) + } +} + +func TestFunctionRegistration(t *testing.T) { + addi_8_16_32 := func(a int8, b int16) int32 { return int32(a) + int32(b) } + addi_64 := func(a, b int64) int64 { return a + b } + addu_8_16_32 := func(a uint8, b uint16) uint32 { return uint32(a) + uint32(b) } + addu_64 := func(a, b uint64) uint64 { return a + b } + addiu := func(a int, b uint) int64 { return int64(a) + int64(b) } + addf_32_64 := func(a float32, b float64) float64 { return float64(a) + b } + not := func(a bool) bool { return !a } + regex := func(re, s string) (bool, error) { + return regexp.MatchString(re, s) + } + generic := func(a interface{}) int64 { + switch a.(type) { + case int64: + return 1 + case float64: + return 2 + case []byte: + return 3 + case string: + return 4 + default: + panic("unreachable") + } + } + variadic := func(a, b int64, c ...int64) int64 { + ret := a + b + for _, d := range c { + ret += d + } + return ret + } + variadicGeneric := func(a ...interface{}) int64 { + return int64(len(a)) + } + + sql.Register("sqlite3_FunctionRegistration", &SQLiteDriver{ + ConnectHook: func(conn *SQLiteConn) error { + if err := conn.RegisterFunc("addi_8_16_32", addi_8_16_32, true); err != nil { + return err + } + if err := conn.RegisterFunc("addi_64", addi_64, true); err != nil { + return err + } + if err := conn.RegisterFunc("addu_8_16_32", addu_8_16_32, true); err != nil { + return err + } + if err := conn.RegisterFunc("addu_64", addu_64, true); err != nil { + return err + } + if err := conn.RegisterFunc("addiu", addiu, true); err != nil { + return err + } + if err := conn.RegisterFunc("addf_32_64", addf_32_64, true); err != nil { + return err + } + if err := conn.RegisterFunc("not", not, true); err != nil { + return err + } + if err := conn.RegisterFunc("regex", regex, true); err != nil { + return err + } + if err := conn.RegisterFunc("generic", generic, true); err != nil { + return err + } + if err := conn.RegisterFunc("variadic", variadic, true); err != nil { + return err + } + if err := conn.RegisterFunc("variadicGeneric", variadicGeneric, true); err != nil { + return err + } + return nil + }, + }) + db, err := sql.Open("sqlite3_FunctionRegistration", ":memory:") + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + ops := []struct { + query string + expected interface{} + }{ + {"SELECT addi_8_16_32(1,2)", int32(3)}, + {"SELECT addi_64(1,2)", int64(3)}, + {"SELECT addu_8_16_32(1,2)", uint32(3)}, + {"SELECT addu_64(1,2)", uint64(3)}, + {"SELECT addiu(1,2)", int64(3)}, + {"SELECT addf_32_64(1.5,1.5)", float64(3)}, + {"SELECT not(1)", false}, + {"SELECT not(0)", true}, + {`SELECT regex("^foo.*", "foobar")`, true}, + {`SELECT regex("^foo.*", "barfoobar")`, false}, + {"SELECT generic(1)", int64(1)}, + {"SELECT generic(1.1)", int64(2)}, + {`SELECT generic(NULL)`, int64(3)}, + {`SELECT generic("foo")`, int64(4)}, + {"SELECT variadic(1,2)", int64(3)}, + {"SELECT variadic(1,2,3,4)", int64(10)}, + {"SELECT variadic(1,1,1,1,1,1,1,1,1,1)", int64(10)}, + {`SELECT variadicGeneric(1,"foo",2.3, NULL)`, int64(4)}, + } + + for _, op := range ops { + ret := reflect.New(reflect.TypeOf(op.expected)) + err = db.QueryRow(op.query).Scan(ret.Interface()) + if err != nil { + t.Errorf("Query %q failed: %s", op.query, err) + } else if !reflect.DeepEqual(ret.Elem().Interface(), op.expected) { + t.Errorf("Query %q returned wrong value: got %v (%T), want %v (%T)", op.query, ret.Elem().Interface(), ret.Elem().Interface(), op.expected, op.expected) + } + } +} + +type sumAggregator int64 + +func (s *sumAggregator) Step(x int64) { + *s += sumAggregator(x) +} + +func (s *sumAggregator) Done() int64 { + return int64(*s) +} + +func TestAggregatorRegistration(t *testing.T) { + customSum := func() *sumAggregator { + var ret sumAggregator + return &ret + } + + sql.Register("sqlite3_AggregatorRegistration", &SQLiteDriver{ + ConnectHook: func(conn *SQLiteConn) error { + if err := conn.RegisterAggregator("customSum", customSum, true); err != nil { + return err + } + return nil + }, + }) + db, err := sql.Open("sqlite3_AggregatorRegistration", ":memory:") + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer db.Close() + + _, err = db.Exec("create table foo (department integer, profits integer)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + _, err = db.Exec("insert into foo values (1, 10), (1, 20), (2, 42)") + if err != nil { + t.Fatal("Failed to insert records:", err) + } + + tests := []struct { + dept, sum int64 + }{ + {1, 30}, + {2, 42}, + } + + for _, test := range tests { + var ret int64 + err = db.QueryRow("select customSum(profits) from foo where department = $1 group by department", test.dept).Scan(&ret) + if err != nil { + t.Fatal("Query failed:", err) + } + if ret != test.sum { + t.Fatalf("Custom sum returned wrong value, got %d, want %d", ret, test.sum) + } + } +} + +func TestDeclTypes(t *testing.T) { + + d := SQLiteDriver{} + + conn, err := d.Open(":memory:") + if err != nil { + t.Fatal("Failed to begin transaction:", err) + } + defer conn.Close() + + sqlite3conn := conn.(*SQLiteConn) + + _, err = sqlite3conn.Exec("create table foo (id integer not null primary key, name text)", nil) + if err != nil { + t.Fatal("Failed to create table:", err) + } + + _, err = sqlite3conn.Exec("insert into foo(name) values(\"bar\")", nil) + if err != nil { + t.Fatal("Failed to insert:", err) + } + + rs, err := sqlite3conn.Query("select * from foo", nil) + if err != nil { + t.Fatal("Failed to select:", err) + } + defer rs.Close() + + declTypes := rs.(*SQLiteRows).DeclTypes() + + if !reflect.DeepEqual(declTypes, []string{"integer", "text"}) { + t.Fatal("Unexpected declTypes:", declTypes) + } +} + +var customFunctionOnce sync.Once + +func BenchmarkCustomFunctions(b *testing.B) { + customFunctionOnce.Do(func() { + custom_add := func(a, b int64) int64 { + return a + b + } + + sql.Register("sqlite3_BenchmarkCustomFunctions", &SQLiteDriver{ + ConnectHook: func(conn *SQLiteConn) error { + // Impure function to force sqlite to reexecute it each time. + if err := conn.RegisterFunc("custom_add", custom_add, false); err != nil { + return err + } + return nil + }, + }) + }) + + db, err := sql.Open("sqlite3_BenchmarkCustomFunctions", ":memory:") + if err != nil { + b.Fatal("Failed to open database:", err) + } + defer db.Close() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + var i int64 + err = db.QueryRow("SELECT custom_add(1,2)").Scan(&i) + if err != nil { + b.Fatal("Failed to run custom add:", err) + } + } +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml b/vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml new file mode 100644 index 000000000000..f1309c9f8c6a --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml @@ -0,0 +1,2 @@ +language: go + diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/README.md b/vendor/github.com/matttproud/golang_protobuf_extensions/README.md new file mode 100644 index 000000000000..751ee69673f0 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/README.md @@ -0,0 +1,20 @@ +# Overview +This repository provides various Protocol Buffer extensions for the Go +language (golang), namely support for record length-delimited message +streaming. + +| Java | Go | +| ------------------------------ | --------------------- | +| MessageLite#parseDelimitedFrom | pbutil.ReadDelimited | +| MessageLite#writeDelimitedTo | pbutil.WriteDelimited | + +Because [Code Review 9102043](https://codereview.appspot.com/9102043/) is +destined to never be merged into mainline (i.e., never be promoted to formal +[goprotobuf features](https://github.com/golang/protobuf)), this repository +will live here in the wild. + +# Documentation +We have [generated Go Doc documentation](http://godoc.org/github.com/matttproud/golang_protobuf_extensions/pbutil) here. + +# Testing +[![Build Status](https://travis-ci.org/matttproud/golang_protobuf_extensions.png?branch=master)](https://travis-ci.org/matttproud/golang_protobuf_extensions) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/BUILD b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/BUILD index 3a224c790af2..4ba71b2f859d 100644 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/BUILD +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -13,9 +8,25 @@ go_library( "encode.go", ], importpath = "github.com/matttproud/golang_protobuf_extensions/pbutil", + visibility = ["//visibility:public"], deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], ) +go_test( + name = "go_default_test", + srcs = [ + "all_test.go", + "fixtures_test.go", + ], + importpath = "github.com/matttproud/golang_protobuf_extensions/pbutil", + library = ":go_default_library", + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/golang/protobuf/proto/testdata:go_default_library", + "//vendor/github.com/matttproud/golang_protobuf_extensions/pbtest:go_default_library", + ], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -27,4 +38,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go new file mode 100644 index 000000000000..094156e66c58 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go @@ -0,0 +1,320 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "bytes" + "math/rand" + "reflect" + "testing" + "testing/quick" + + "github.com/matttproud/golang_protobuf_extensions/pbtest" + + . "github.com/golang/protobuf/proto" + . "github.com/golang/protobuf/proto/testdata" +) + +func TestWriteDelimited(t *testing.T) { + for _, test := range []struct { + msg Message + buf []byte + n int + err error + }{ + { + msg: &Empty{}, + n: 1, + buf: []byte{0}, + }, + { + msg: &GoEnum{Foo: FOO_FOO1.Enum()}, + n: 3, + buf: []byte{2, 8, 1}, + }, + { + msg: &Strings{ + StringField: String(`This is my gigantic, unhappy string. It exceeds +the encoding size of a single byte varint. We are using it to fuzz test the +correctness of the header decoding mechanisms, which may prove problematic. +I expect it may. Let's hope you enjoy testing as much as we do.`), + }, + n: 271, + buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109, + 121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104, + 97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73, + 116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101, + 110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102, + 32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32, + 118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32, + 117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122, + 122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114, + 101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32, + 104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103, + 32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104, + 105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112, + 114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120, + 112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101, + 116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110, + 106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32, + 109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46}, + }, + } { + var buf bytes.Buffer + if n, err := WriteDelimited(&buf, test.msg); n != test.n || err != test.err { + t.Fatalf("WriteDelimited(buf, %#v) = %v, %v; want %v, %v", test.msg, n, err, test.n, test.err) + } + if out := buf.Bytes(); !bytes.Equal(out, test.buf) { + t.Fatalf("WriteDelimited(buf, %#v); buf = %v; want %v", test.msg, out, test.buf) + } + } +} + +func TestReadDelimited(t *testing.T) { + for _, test := range []struct { + buf []byte + msg Message + n int + err error + }{ + { + buf: []byte{0}, + msg: &Empty{}, + n: 1, + }, + { + n: 3, + buf: []byte{2, 8, 1}, + msg: &GoEnum{Foo: FOO_FOO1.Enum()}, + }, + { + buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109, + 121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104, + 97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73, + 116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101, + 110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102, + 32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32, + 118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32, + 117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122, + 122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114, + 101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32, + 104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103, + 32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104, + 105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112, + 114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120, + 112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101, + 116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110, + 106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32, + 109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46}, + msg: &Strings{ + StringField: String(`This is my gigantic, unhappy string. It exceeds +the encoding size of a single byte varint. We are using it to fuzz test the +correctness of the header decoding mechanisms, which may prove problematic. +I expect it may. Let's hope you enjoy testing as much as we do.`), + }, + n: 271, + }, + } { + msg := Clone(test.msg) + msg.Reset() + if n, err := ReadDelimited(bytes.NewBuffer(test.buf), msg); n != test.n || err != test.err { + t.Fatalf("ReadDelimited(%v, msg) = %v, %v; want %v, %v", test.buf, n, err, test.n, test.err) + } + if !Equal(msg, test.msg) { + t.Fatalf("ReadDelimited(%v, msg); msg = %v; want %v", test.buf, msg, test.msg) + } + } +} + +func TestEndToEndValid(t *testing.T) { + for _, test := range [][]Message{ + {&Empty{}}, + {&GoEnum{Foo: FOO_FOO1.Enum()}, &Empty{}, &GoEnum{Foo: FOO_FOO1.Enum()}}, + {&GoEnum{Foo: FOO_FOO1.Enum()}}, + {&Strings{ + StringField: String(`This is my gigantic, unhappy string. It exceeds +the encoding size of a single byte varint. We are using it to fuzz test the +correctness of the header decoding mechanisms, which may prove problematic. +I expect it may. Let's hope you enjoy testing as much as we do.`), + }}, + } { + var buf bytes.Buffer + var written int + for i, msg := range test { + n, err := WriteDelimited(&buf, msg) + if err != nil { + // Assumption: TestReadDelimited and TestWriteDelimited are sufficient + // and inputs for this test are explicitly exercised there. + t.Fatalf("WriteDelimited(buf, %v[%d]) = ?, %v; wanted ?, nil", test, i, err) + } + written += n + } + var read int + for i, msg := range test { + out := Clone(msg) + out.Reset() + n, _ := ReadDelimited(&buf, out) + // Decide to do EOF checking? + read += n + if !Equal(out, msg) { + t.Fatalf("out = %v; want %v[%d] = %#v", out, test, i, msg) + } + } + if read != written { + t.Fatalf("%v read = %d; want %d", test, read, written) + } + } +} + +// rndMessage generates a random valid Protocol Buffer message. +func rndMessage(r *rand.Rand) Message { + var t reflect.Type + switch v := rand.Intn(23); v { + // TODO(br): Uncomment the elements below once fix is incorporated, except + // for the elements marked as patently incompatible. + // case 0: + // t = reflect.TypeOf(&GoEnum{}) + // break + // case 1: + // t = reflect.TypeOf(&GoTestField{}) + // break + case 2: + t = reflect.TypeOf(&GoTest{}) + break + // case 3: + // t = reflect.TypeOf(&GoSkipTest{}) + // break + // case 4: + // t = reflect.TypeOf(&NonPackedTest{}) + // break + // case 5: + // t = reflect.TypeOf(&PackedTest{}) + // break + case 6: + t = reflect.TypeOf(&MaxTag{}) + break + case 7: + t = reflect.TypeOf(&OldMessage{}) + break + case 8: + t = reflect.TypeOf(&NewMessage{}) + break + case 9: + t = reflect.TypeOf(&InnerMessage{}) + break + case 10: + t = reflect.TypeOf(&OtherMessage{}) + break + case 11: + // PATENTLY INVALID FOR FUZZ GENERATION + // t = reflect.TypeOf(&MyMessage{}) + break + // case 12: + // t = reflect.TypeOf(&Ext{}) + // break + case 13: + // PATENTLY INVALID FOR FUZZ GENERATION + // t = reflect.TypeOf(&MyMessageSet{}) + break + // case 14: + // t = reflect.TypeOf(&Empty{}) + // break + // case 15: + // t = reflect.TypeOf(&MessageList{}) + // break + // case 16: + // t = reflect.TypeOf(&Strings{}) + // break + // case 17: + // t = reflect.TypeOf(&Defaults{}) + // break + // case 17: + // t = reflect.TypeOf(&SubDefaults{}) + // break + // case 18: + // t = reflect.TypeOf(&RepeatedEnum{}) + // break + case 19: + t = reflect.TypeOf(&MoreRepeated{}) + break + // case 20: + // t = reflect.TypeOf(&GroupOld{}) + // break + // case 21: + // t = reflect.TypeOf(&GroupNew{}) + // break + case 22: + t = reflect.TypeOf(&FloatingPoint{}) + break + default: + // TODO(br): Replace with an unreachable once fixed. + t = reflect.TypeOf(&GoTest{}) + break + } + if t == nil { + t = reflect.TypeOf(&GoTest{}) + } + v, ok := quick.Value(t, r) + if !ok { + panic("attempt to generate illegal item; consult item 11") + } + if err := pbtest.SanitizeGenerated(v.Interface().(Message)); err != nil { + panic(err) + } + return v.Interface().(Message) +} + +// rndMessages generates several random Protocol Buffer messages. +func rndMessages(r *rand.Rand) []Message { + n := r.Intn(128) + out := make([]Message, 0, n) + for i := 0; i < n; i++ { + out = append(out, rndMessage(r)) + } + return out +} + +func TestFuzz(t *testing.T) { + rnd := rand.New(rand.NewSource(42)) + check := func() bool { + messages := rndMessages(rnd) + var buf bytes.Buffer + var written int + for i, msg := range messages { + n, err := WriteDelimited(&buf, msg) + if err != nil { + t.Fatalf("WriteDelimited(buf, %v[%d]) = ?, %v; wanted ?, nil", messages, i, err) + } + written += n + } + var read int + for i, msg := range messages { + out := Clone(msg) + out.Reset() + n, _ := ReadDelimited(&buf, out) + read += n + if !Equal(out, msg) { + t.Fatalf("out = %v; want %v[%d] = %#v", out, messages, i, msg) + } + } + if read != written { + t.Fatalf("%v read = %d; want %d", messages, read, written) + } + return true + } + if err := quick.Check(check, nil); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/fixtures_test.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/fixtures_test.go new file mode 100644 index 000000000000..d6d9b25594f2 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/fixtures_test.go @@ -0,0 +1,103 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// http://github.com/golang/protobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package pbutil + +import ( + . "github.com/golang/protobuf/proto" + . "github.com/golang/protobuf/proto/testdata" +) + +// FROM https://github.com/golang/protobuf/blob/master/proto/all_test.go. + +func initGoTestField() *GoTestField { + f := new(GoTestField) + f.Label = String("label") + f.Type = String("type") + return f +} + +// These are all structurally equivalent but the tag numbers differ. +// (It's remarkable that required, optional, and repeated all have +// 8 letters.) +func initGoTest_RequiredGroup() *GoTest_RequiredGroup { + return &GoTest_RequiredGroup{ + RequiredField: String("required"), + } +} + +func initGoTest_OptionalGroup() *GoTest_OptionalGroup { + return &GoTest_OptionalGroup{ + RequiredField: String("optional"), + } +} + +func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { + return &GoTest_RepeatedGroup{ + RequiredField: String("repeated"), + } +} + +func initGoTest(setdefaults bool) *GoTest { + pb := new(GoTest) + if setdefaults { + pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) + pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) + pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) + pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) + pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) + pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) + pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) + pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) + pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) + pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) + pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted + pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) + pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) + } + + pb.Kind = GoTest_TIME.Enum() + pb.RequiredField = initGoTestField() + pb.F_BoolRequired = Bool(true) + pb.F_Int32Required = Int32(3) + pb.F_Int64Required = Int64(6) + pb.F_Fixed32Required = Uint32(32) + pb.F_Fixed64Required = Uint64(64) + pb.F_Uint32Required = Uint32(3232) + pb.F_Uint64Required = Uint64(6464) + pb.F_FloatRequired = Float32(3232) + pb.F_DoubleRequired = Float64(6464) + pb.F_StringRequired = String("string") + pb.F_BytesRequired = []byte("bytes") + pb.F_Sint32Required = Int32(-32) + pb.F_Sint64Required = Int64(-64) + pb.Requiredgroup = initGoTest_RequiredGroup() + + return pb +} diff --git a/vendor/github.com/petar/GoLLRB/.gitignore b/vendor/github.com/petar/GoLLRB/.gitignore new file mode 100644 index 000000000000..e333b2dbf73f --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/.gitignore @@ -0,0 +1,23 @@ +syntax:glob +*.[568ao] +*.ao +*.so +*.pyc +*.swp +*.swo +._* +.nfs.* +[568a].out +*~ +*.orig +*.pb.go +core +_obj +_test +src/pkg/Make.deps +_testmain.go + +syntax:regexp +^pkg/ +^src/cmd/(.*)/6?\1$ +^.*/core.[0-9]*$ diff --git a/vendor/github.com/petar/GoLLRB/AUTHORS b/vendor/github.com/petar/GoLLRB/AUTHORS new file mode 100644 index 000000000000..78d1de495661 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/AUTHORS @@ -0,0 +1,4 @@ +Petar Maymounkov +Vadim Vygonets +Ian Smith +Martin Bruse diff --git a/vendor/github.com/petar/GoLLRB/LICENSE b/vendor/github.com/petar/GoLLRB/LICENSE new file mode 100644 index 000000000000..b75312c787d6 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2010, Petar Maymounkov +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +(*) Redistributions of source code must retain the above copyright notice, this list +of conditions and the following disclaimer. + +(*) Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +(*) Neither the name of Petar Maymounkov nor the names of its contributors may be +used to endorse or promote products derived from this software without specific +prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/petar/GoLLRB/README.md b/vendor/github.com/petar/GoLLRB/README.md new file mode 100644 index 000000000000..742ca0bd5604 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/README.md @@ -0,0 +1,66 @@ +# GoLLRB + +GoLLRB is a Left-Leaning Red-Black (LLRB) implementation of 2-3 balanced binary +search trees in Go Language. + +## Overview + +As of this writing and to the best of the author's knowledge, +Go still does not have a balanced binary search tree (BBST) data structure. +These data structures are quite useful in a variety of cases. A BBST maintains +elements in sorted order under dynamic updates (inserts and deletes) and can +support various order-specific queries. Furthermore, in practice one often +implements other common data structures like Priority Queues, using BBST's. + +2-3 trees (a type of BBST's), as well as the runtime-similar 2-3-4 trees, are +the de facto standard BBST algoritms found in implementations of Python, Java, +and other libraries. The LLRB method of implementing 2-3 trees is a recent +improvement over the traditional implementation. The LLRB approach was +discovered relatively recently (in 2008) by Robert Sedgewick of Princeton +University. + +GoLLRB is a Go implementation of LLRB 2-3 trees. + +## Maturity + +GoLLRB has been used in some pretty heavy-weight machine learning tasks over many gigabytes of data. +I consider it to be in stable, perhaps even production, shape. There are no known bugs. + +## Installation + +With a healthy Go Language installed, simply run `go get github.com/petar/GoLLRB/llrb` + +## Example + + package main + + import ( + "fmt" + "github.com/petar/GoLLRB/llrb" + ) + + func lessInt(a, b interface{}) bool { return a.(int) < b.(int) } + + func main() { + tree := llrb.New(lessInt) + tree.ReplaceOrInsert(1) + tree.ReplaceOrInsert(2) + tree.ReplaceOrInsert(3) + tree.ReplaceOrInsert(4) + tree.DeleteMin() + tree.Delete(4) + c := tree.IterAscend() + for { + u := <-c + if u == nil { + break + } + fmt.Printf("%d\n", int(u.(int))) + } + } + +## About + +GoLLRB was written by [Petar Maymounkov](http://pdos.csail.mit.edu/~petar/). + +Follow me on [Twitter @maymounkov](http://www.twitter.com/maymounkov)! diff --git a/vendor/github.com/petar/GoLLRB/llrb/BUILD b/vendor/github.com/petar/GoLLRB/llrb/BUILD new file mode 100644 index 000000000000..187bc94e2a93 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/BUILD @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "avgvar.go", + "iterator.go", + "llrb.go", + "llrb-stats.go", + "util.go", + ], + importpath = "github.com/petar/GoLLRB/llrb", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = [ + "iterator_test.go", + "llrb_test.go", + ], + importpath = "github.com/petar/GoLLRB/llrb", + library = ":go_default_library", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/petar/GoLLRB/llrb/avgvar.go b/vendor/github.com/petar/GoLLRB/llrb/avgvar.go new file mode 100644 index 000000000000..2d7e2a3262d2 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/avgvar.go @@ -0,0 +1,39 @@ +// Copyright 2010 Petar Maymounkov. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package llrb + +import "math" + +// avgVar maintains the average and variance of a stream of numbers +// in a space-efficient manner. +type avgVar struct { + count int64 + sum, sumsq float64 +} + +func (av *avgVar) Init() { + av.count = 0 + av.sum = 0.0 + av.sumsq = 0.0 +} + +func (av *avgVar) Add(sample float64) { + av.count++ + av.sum += sample + av.sumsq += sample * sample +} + +func (av *avgVar) GetCount() int64 { return av.count } + +func (av *avgVar) GetAvg() float64 { return av.sum / float64(av.count) } + +func (av *avgVar) GetTotal() float64 { return av.sum } + +func (av *avgVar) GetVar() float64 { + a := av.GetAvg() + return av.sumsq/float64(av.count) - a*a +} + +func (av *avgVar) GetStdDev() float64 { return math.Sqrt(av.GetVar()) } diff --git a/vendor/github.com/petar/GoLLRB/llrb/iterator.go b/vendor/github.com/petar/GoLLRB/llrb/iterator.go new file mode 100644 index 000000000000..ee7b27f442b0 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/iterator.go @@ -0,0 +1,93 @@ +package llrb + +type ItemIterator func(i Item) bool + +//func (t *Tree) Ascend(iterator ItemIterator) { +// t.AscendGreaterOrEqual(Inf(-1), iterator) +//} + +func (t *LLRB) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + t.ascendRange(t.root, greaterOrEqual, lessThan, iterator) +} + +func (t *LLRB) ascendRange(h *Node, inf, sup Item, iterator ItemIterator) bool { + if h == nil { + return true + } + if !less(h.Item, sup) { + return t.ascendRange(h.Left, inf, sup, iterator) + } + if less(h.Item, inf) { + return t.ascendRange(h.Right, inf, sup, iterator) + } + + if !t.ascendRange(h.Left, inf, sup, iterator) { + return false + } + if !iterator(h.Item) { + return false + } + return t.ascendRange(h.Right, inf, sup, iterator) +} + +// AscendGreaterOrEqual will call iterator once for each element greater or equal to +// pivot in ascending order. It will stop whenever the iterator returns false. +func (t *LLRB) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + t.ascendGreaterOrEqual(t.root, pivot, iterator) +} + +func (t *LLRB) ascendGreaterOrEqual(h *Node, pivot Item, iterator ItemIterator) bool { + if h == nil { + return true + } + if !less(h.Item, pivot) { + if !t.ascendGreaterOrEqual(h.Left, pivot, iterator) { + return false + } + if !iterator(h.Item) { + return false + } + } + return t.ascendGreaterOrEqual(h.Right, pivot, iterator) +} + +func (t *LLRB) AscendLessThan(pivot Item, iterator ItemIterator) { + t.ascendLessThan(t.root, pivot, iterator) +} + +func (t *LLRB) ascendLessThan(h *Node, pivot Item, iterator ItemIterator) bool { + if h == nil { + return true + } + if !t.ascendLessThan(h.Left, pivot, iterator) { + return false + } + if !iterator(h.Item) { + return false + } + if less(h.Item, pivot) { + return t.ascendLessThan(h.Left, pivot, iterator) + } + return true +} + +// DescendLessOrEqual will call iterator once for each element less than the +// pivot in descending order. It will stop whenever the iterator returns false. +func (t *LLRB) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + t.descendLessOrEqual(t.root, pivot, iterator) +} + +func (t *LLRB) descendLessOrEqual(h *Node, pivot Item, iterator ItemIterator) bool { + if h == nil { + return true + } + if less(h.Item, pivot) || !less(pivot, h.Item) { + if !t.descendLessOrEqual(h.Right, pivot, iterator) { + return false + } + if !iterator(h.Item) { + return false + } + } + return t.descendLessOrEqual(h.Left, pivot, iterator) +} diff --git a/vendor/github.com/petar/GoLLRB/llrb/iterator_test.go b/vendor/github.com/petar/GoLLRB/llrb/iterator_test.go new file mode 100644 index 000000000000..db5e12c92eb7 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/iterator_test.go @@ -0,0 +1,76 @@ +package llrb + +import ( + "reflect" + "testing" +) + +func TestAscendGreaterOrEqual(t *testing.T) { + tree := New() + tree.InsertNoReplace(Int(4)) + tree.InsertNoReplace(Int(6)) + tree.InsertNoReplace(Int(1)) + tree.InsertNoReplace(Int(3)) + var ary []Item + tree.AscendGreaterOrEqual(Int(-1), func(i Item) bool { + ary = append(ary, i) + return true + }) + expected := []Item{Int(1), Int(3), Int(4), Int(6)} + if !reflect.DeepEqual(ary, expected) { + t.Errorf("expected %v but got %v", expected, ary) + } + ary = nil + tree.AscendGreaterOrEqual(Int(3), func(i Item) bool { + ary = append(ary, i) + return true + }) + expected = []Item{Int(3), Int(4), Int(6)} + if !reflect.DeepEqual(ary, expected) { + t.Errorf("expected %v but got %v", expected, ary) + } + ary = nil + tree.AscendGreaterOrEqual(Int(2), func(i Item) bool { + ary = append(ary, i) + return true + }) + expected = []Item{Int(3), Int(4), Int(6)} + if !reflect.DeepEqual(ary, expected) { + t.Errorf("expected %v but got %v", expected, ary) + } +} + +func TestDescendLessOrEqual(t *testing.T) { + tree := New() + tree.InsertNoReplace(Int(4)) + tree.InsertNoReplace(Int(6)) + tree.InsertNoReplace(Int(1)) + tree.InsertNoReplace(Int(3)) + var ary []Item + tree.DescendLessOrEqual(Int(10), func(i Item) bool { + ary = append(ary, i) + return true + }) + expected := []Item{Int(6), Int(4), Int(3), Int(1)} + if !reflect.DeepEqual(ary, expected) { + t.Errorf("expected %v but got %v", expected, ary) + } + ary = nil + tree.DescendLessOrEqual(Int(4), func(i Item) bool { + ary = append(ary, i) + return true + }) + expected = []Item{Int(4), Int(3), Int(1)} + if !reflect.DeepEqual(ary, expected) { + t.Errorf("expected %v but got %v", expected, ary) + } + ary = nil + tree.DescendLessOrEqual(Int(5), func(i Item) bool { + ary = append(ary, i) + return true + }) + expected = []Item{Int(4), Int(3), Int(1)} + if !reflect.DeepEqual(ary, expected) { + t.Errorf("expected %v but got %v", expected, ary) + } +} diff --git a/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go b/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go new file mode 100644 index 000000000000..47126a3be967 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go @@ -0,0 +1,46 @@ +// Copyright 2010 Petar Maymounkov. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package llrb + +// GetHeight() returns an item in the tree with key @key, and it's height in the tree +func (t *LLRB) GetHeight(key Item) (result Item, depth int) { + return t.getHeight(t.root, key) +} + +func (t *LLRB) getHeight(h *Node, item Item) (Item, int) { + if h == nil { + return nil, 0 + } + if less(item, h.Item) { + result, depth := t.getHeight(h.Left, item) + return result, depth + 1 + } + if less(h.Item, item) { + result, depth := t.getHeight(h.Right, item) + return result, depth + 1 + } + return h.Item, 0 +} + +// HeightStats() returns the average and standard deviation of the height +// of elements in the tree +func (t *LLRB) HeightStats() (avg, stddev float64) { + av := &avgVar{} + heightStats(t.root, 0, av) + return av.GetAvg(), av.GetStdDev() +} + +func heightStats(h *Node, d int, av *avgVar) { + if h == nil { + return + } + av.Add(float64(d)) + if h.Left != nil { + heightStats(h.Left, d+1, av) + } + if h.Right != nil { + heightStats(h.Right, d+1, av) + } +} diff --git a/vendor/github.com/petar/GoLLRB/llrb/llrb.go b/vendor/github.com/petar/GoLLRB/llrb/llrb.go new file mode 100644 index 000000000000..81373fbfdf09 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/llrb.go @@ -0,0 +1,456 @@ +// Copyright 2010 Petar Maymounkov. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// A Left-Leaning Red-Black (LLRB) implementation of 2-3 balanced binary search trees, +// based on the following work: +// +// http://www.cs.princeton.edu/~rs/talks/LLRB/08Penn.pdf +// http://www.cs.princeton.edu/~rs/talks/LLRB/LLRB.pdf +// http://www.cs.princeton.edu/~rs/talks/LLRB/Java/RedBlackBST.java +// +// 2-3 trees (and the run-time equivalent 2-3-4 trees) are the de facto standard BST +// algoritms found in implementations of Python, Java, and other libraries. The LLRB +// implementation of 2-3 trees is a recent improvement on the traditional implementation, +// observed and documented by Robert Sedgewick. +// +package llrb + +// Tree is a Left-Leaning Red-Black (LLRB) implementation of 2-3 trees +type LLRB struct { + count int + root *Node +} + +type Node struct { + Item + Left, Right *Node // Pointers to left and right child nodes + Black bool // If set, the color of the link (incoming from the parent) is black + // In the LLRB, new nodes are always red, hence the zero-value for node +} + +type Item interface { + Less(than Item) bool +} + +// +func less(x, y Item) bool { + if x == pinf { + return false + } + if x == ninf { + return true + } + return x.Less(y) +} + +// Inf returns an Item that is "bigger than" any other item, if sign is positive. +// Otherwise it returns an Item that is "smaller than" any other item. +func Inf(sign int) Item { + if sign == 0 { + panic("sign") + } + if sign > 0 { + return pinf + } + return ninf +} + +var ( + ninf = nInf{} + pinf = pInf{} +) + +type nInf struct{} + +func (nInf) Less(Item) bool { + return true +} + +type pInf struct{} + +func (pInf) Less(Item) bool { + return false +} + +// New() allocates a new tree +func New() *LLRB { + return &LLRB{} +} + +// SetRoot sets the root node of the tree. +// It is intended to be used by functions that deserialize the tree. +func (t *LLRB) SetRoot(r *Node) { + t.root = r +} + +// Root returns the root node of the tree. +// It is intended to be used by functions that serialize the tree. +func (t *LLRB) Root() *Node { + return t.root +} + +// Len returns the number of nodes in the tree. +func (t *LLRB) Len() int { return t.count } + +// Has returns true if the tree contains an element whose order is the same as that of key. +func (t *LLRB) Has(key Item) bool { + return t.Get(key) != nil +} + +// Get retrieves an element from the tree whose order is the same as that of key. +func (t *LLRB) Get(key Item) Item { + h := t.root + for h != nil { + switch { + case less(key, h.Item): + h = h.Left + case less(h.Item, key): + h = h.Right + default: + return h.Item + } + } + return nil +} + +// Min returns the minimum element in the tree. +func (t *LLRB) Min() Item { + h := t.root + if h == nil { + return nil + } + for h.Left != nil { + h = h.Left + } + return h.Item +} + +// Max returns the maximum element in the tree. +func (t *LLRB) Max() Item { + h := t.root + if h == nil { + return nil + } + for h.Right != nil { + h = h.Right + } + return h.Item +} + +func (t *LLRB) ReplaceOrInsertBulk(items ...Item) { + for _, i := range items { + t.ReplaceOrInsert(i) + } +} + +func (t *LLRB) InsertNoReplaceBulk(items ...Item) { + for _, i := range items { + t.InsertNoReplace(i) + } +} + +// ReplaceOrInsert inserts item into the tree. If an existing +// element has the same order, it is removed from the tree and returned. +func (t *LLRB) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("inserting nil item") + } + var replaced Item + t.root, replaced = t.replaceOrInsert(t.root, item) + t.root.Black = true + if replaced == nil { + t.count++ + } + return replaced +} + +func (t *LLRB) replaceOrInsert(h *Node, item Item) (*Node, Item) { + if h == nil { + return newNode(item), nil + } + + h = walkDownRot23(h) + + var replaced Item + if less(item, h.Item) { // BUG + h.Left, replaced = t.replaceOrInsert(h.Left, item) + } else if less(h.Item, item) { + h.Right, replaced = t.replaceOrInsert(h.Right, item) + } else { + replaced, h.Item = h.Item, item + } + + h = walkUpRot23(h) + + return h, replaced +} + +// InsertNoReplace inserts item into the tree. If an existing +// element has the same order, both elements remain in the tree. +func (t *LLRB) InsertNoReplace(item Item) { + if item == nil { + panic("inserting nil item") + } + t.root = t.insertNoReplace(t.root, item) + t.root.Black = true + t.count++ +} + +func (t *LLRB) insertNoReplace(h *Node, item Item) *Node { + if h == nil { + return newNode(item) + } + + h = walkDownRot23(h) + + if less(item, h.Item) { + h.Left = t.insertNoReplace(h.Left, item) + } else { + h.Right = t.insertNoReplace(h.Right, item) + } + + return walkUpRot23(h) +} + +// Rotation driver routines for 2-3 algorithm + +func walkDownRot23(h *Node) *Node { return h } + +func walkUpRot23(h *Node) *Node { + if isRed(h.Right) && !isRed(h.Left) { + h = rotateLeft(h) + } + + if isRed(h.Left) && isRed(h.Left.Left) { + h = rotateRight(h) + } + + if isRed(h.Left) && isRed(h.Right) { + flip(h) + } + + return h +} + +// Rotation driver routines for 2-3-4 algorithm + +func walkDownRot234(h *Node) *Node { + if isRed(h.Left) && isRed(h.Right) { + flip(h) + } + + return h +} + +func walkUpRot234(h *Node) *Node { + if isRed(h.Right) && !isRed(h.Left) { + h = rotateLeft(h) + } + + if isRed(h.Left) && isRed(h.Left.Left) { + h = rotateRight(h) + } + + return h +} + +// DeleteMin deletes the minimum element in the tree and returns the +// deleted item or nil otherwise. +func (t *LLRB) DeleteMin() Item { + var deleted Item + t.root, deleted = deleteMin(t.root) + if t.root != nil { + t.root.Black = true + } + if deleted != nil { + t.count-- + } + return deleted +} + +// deleteMin code for LLRB 2-3 trees +func deleteMin(h *Node) (*Node, Item) { + if h == nil { + return nil, nil + } + if h.Left == nil { + return nil, h.Item + } + + if !isRed(h.Left) && !isRed(h.Left.Left) { + h = moveRedLeft(h) + } + + var deleted Item + h.Left, deleted = deleteMin(h.Left) + + return fixUp(h), deleted +} + +// DeleteMax deletes the maximum element in the tree and returns +// the deleted item or nil otherwise +func (t *LLRB) DeleteMax() Item { + var deleted Item + t.root, deleted = deleteMax(t.root) + if t.root != nil { + t.root.Black = true + } + if deleted != nil { + t.count-- + } + return deleted +} + +func deleteMax(h *Node) (*Node, Item) { + if h == nil { + return nil, nil + } + if isRed(h.Left) { + h = rotateRight(h) + } + if h.Right == nil { + return nil, h.Item + } + if !isRed(h.Right) && !isRed(h.Right.Left) { + h = moveRedRight(h) + } + var deleted Item + h.Right, deleted = deleteMax(h.Right) + + return fixUp(h), deleted +} + +// Delete deletes an item from the tree whose key equals key. +// The deleted item is return, otherwise nil is returned. +func (t *LLRB) Delete(key Item) Item { + var deleted Item + t.root, deleted = t.delete(t.root, key) + if t.root != nil { + t.root.Black = true + } + if deleted != nil { + t.count-- + } + return deleted +} + +func (t *LLRB) delete(h *Node, item Item) (*Node, Item) { + var deleted Item + if h == nil { + return nil, nil + } + if less(item, h.Item) { + if h.Left == nil { // item not present. Nothing to delete + return h, nil + } + if !isRed(h.Left) && !isRed(h.Left.Left) { + h = moveRedLeft(h) + } + h.Left, deleted = t.delete(h.Left, item) + } else { + if isRed(h.Left) { + h = rotateRight(h) + } + // If @item equals @h.Item and no right children at @h + if !less(h.Item, item) && h.Right == nil { + return nil, h.Item + } + // PETAR: Added 'h.Right != nil' below + if h.Right != nil && !isRed(h.Right) && !isRed(h.Right.Left) { + h = moveRedRight(h) + } + // If @item equals @h.Item, and (from above) 'h.Right != nil' + if !less(h.Item, item) { + var subDeleted Item + h.Right, subDeleted = deleteMin(h.Right) + if subDeleted == nil { + panic("logic") + } + deleted, h.Item = h.Item, subDeleted + } else { // Else, @item is bigger than @h.Item + h.Right, deleted = t.delete(h.Right, item) + } + } + + return fixUp(h), deleted +} + +// Internal node manipulation routines + +func newNode(item Item) *Node { return &Node{Item: item} } + +func isRed(h *Node) bool { + if h == nil { + return false + } + return !h.Black +} + +func rotateLeft(h *Node) *Node { + x := h.Right + if x.Black { + panic("rotating a black link") + } + h.Right = x.Left + x.Left = h + x.Black = h.Black + h.Black = false + return x +} + +func rotateRight(h *Node) *Node { + x := h.Left + if x.Black { + panic("rotating a black link") + } + h.Left = x.Right + x.Right = h + x.Black = h.Black + h.Black = false + return x +} + +// REQUIRE: Left and Right children must be present +func flip(h *Node) { + h.Black = !h.Black + h.Left.Black = !h.Left.Black + h.Right.Black = !h.Right.Black +} + +// REQUIRE: Left and Right children must be present +func moveRedLeft(h *Node) *Node { + flip(h) + if isRed(h.Right.Left) { + h.Right = rotateRight(h.Right) + h = rotateLeft(h) + flip(h) + } + return h +} + +// REQUIRE: Left and Right children must be present +func moveRedRight(h *Node) *Node { + flip(h) + if isRed(h.Left.Left) { + h = rotateRight(h) + flip(h) + } + return h +} + +func fixUp(h *Node) *Node { + if isRed(h.Right) { + h = rotateLeft(h) + } + + if isRed(h.Left) && isRed(h.Left.Left) { + h = rotateRight(h) + } + + if isRed(h.Left) && isRed(h.Right) { + flip(h) + } + + return h +} diff --git a/vendor/github.com/petar/GoLLRB/llrb/llrb_test.go b/vendor/github.com/petar/GoLLRB/llrb/llrb_test.go new file mode 100644 index 000000000000..b7bc97800706 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/llrb_test.go @@ -0,0 +1,239 @@ +// Copyright 2010 Petar Maymounkov. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package llrb + +import ( + "math" + "math/rand" + "testing" +) + +func TestCases(t *testing.T) { + tree := New() + tree.ReplaceOrInsert(Int(1)) + tree.ReplaceOrInsert(Int(1)) + if tree.Len() != 1 { + t.Errorf("expecting len 1") + } + if !tree.Has(Int(1)) { + t.Errorf("expecting to find key=1") + } + + tree.Delete(Int(1)) + if tree.Len() != 0 { + t.Errorf("expecting len 0") + } + if tree.Has(Int(1)) { + t.Errorf("not expecting to find key=1") + } + + tree.Delete(Int(1)) + if tree.Len() != 0 { + t.Errorf("expecting len 0") + } + if tree.Has(Int(1)) { + t.Errorf("not expecting to find key=1") + } +} + +func TestReverseInsertOrder(t *testing.T) { + tree := New() + n := 100 + for i := 0; i < n; i++ { + tree.ReplaceOrInsert(Int(n - i)) + } + i := 0 + tree.AscendGreaterOrEqual(Int(0), func(item Item) bool { + i++ + if item.(Int) != Int(i) { + t.Errorf("bad order: got %d, expect %d", item.(Int), i) + } + return true + }) +} + +func TestRange(t *testing.T) { + tree := New() + order := []String{ + "ab", "aba", "abc", "a", "aa", "aaa", "b", "a-", "a!", + } + for _, i := range order { + tree.ReplaceOrInsert(i) + } + k := 0 + tree.AscendRange(String("ab"), String("ac"), func(item Item) bool { + if k > 3 { + t.Fatalf("returned more items than expected") + } + i1 := order[k] + i2 := item.(String) + if i1 != i2 { + t.Errorf("expecting %s, got %s", i1, i2) + } + k++ + return true + }) +} + +func TestRandomInsertOrder(t *testing.T) { + tree := New() + n := 1000 + perm := rand.Perm(n) + for i := 0; i < n; i++ { + tree.ReplaceOrInsert(Int(perm[i])) + } + j := 0 + tree.AscendGreaterOrEqual(Int(0), func(item Item) bool { + if item.(Int) != Int(j) { + t.Fatalf("bad order") + } + j++ + return true + }) +} + +func TestRandomReplace(t *testing.T) { + tree := New() + n := 100 + perm := rand.Perm(n) + for i := 0; i < n; i++ { + tree.ReplaceOrInsert(Int(perm[i])) + } + perm = rand.Perm(n) + for i := 0; i < n; i++ { + if replaced := tree.ReplaceOrInsert(Int(perm[i])); replaced == nil || replaced.(Int) != Int(perm[i]) { + t.Errorf("error replacing") + } + } +} + +func TestRandomInsertSequentialDelete(t *testing.T) { + tree := New() + n := 1000 + perm := rand.Perm(n) + for i := 0; i < n; i++ { + tree.ReplaceOrInsert(Int(perm[i])) + } + for i := 0; i < n; i++ { + tree.Delete(Int(i)) + } +} + +func TestRandomInsertDeleteNonExistent(t *testing.T) { + tree := New() + n := 100 + perm := rand.Perm(n) + for i := 0; i < n; i++ { + tree.ReplaceOrInsert(Int(perm[i])) + } + if tree.Delete(Int(200)) != nil { + t.Errorf("deleted non-existent item") + } + if tree.Delete(Int(-2)) != nil { + t.Errorf("deleted non-existent item") + } + for i := 0; i < n; i++ { + if u := tree.Delete(Int(i)); u == nil || u.(Int) != Int(i) { + t.Errorf("delete failed") + } + } + if tree.Delete(Int(200)) != nil { + t.Errorf("deleted non-existent item") + } + if tree.Delete(Int(-2)) != nil { + t.Errorf("deleted non-existent item") + } +} + +func TestRandomInsertPartialDeleteOrder(t *testing.T) { + tree := New() + n := 100 + perm := rand.Perm(n) + for i := 0; i < n; i++ { + tree.ReplaceOrInsert(Int(perm[i])) + } + for i := 1; i < n-1; i++ { + tree.Delete(Int(i)) + } + j := 0 + tree.AscendGreaterOrEqual(Int(0), func(item Item) bool { + switch j { + case 0: + if item.(Int) != Int(0) { + t.Errorf("expecting 0") + } + case 1: + if item.(Int) != Int(n-1) { + t.Errorf("expecting %d", n-1) + } + } + j++ + return true + }) +} + +func TestRandomInsertStats(t *testing.T) { + tree := New() + n := 100000 + perm := rand.Perm(n) + for i := 0; i < n; i++ { + tree.ReplaceOrInsert(Int(perm[i])) + } + avg, _ := tree.HeightStats() + expAvg := math.Log2(float64(n)) - 1.5 + if math.Abs(avg-expAvg) >= 2.0 { + t.Errorf("too much deviation from expected average height") + } +} + +func BenchmarkInsert(b *testing.B) { + tree := New() + for i := 0; i < b.N; i++ { + tree.ReplaceOrInsert(Int(b.N - i)) + } +} + +func BenchmarkDelete(b *testing.B) { + b.StopTimer() + tree := New() + for i := 0; i < b.N; i++ { + tree.ReplaceOrInsert(Int(b.N - i)) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + tree.Delete(Int(i)) + } +} + +func BenchmarkDeleteMin(b *testing.B) { + b.StopTimer() + tree := New() + for i := 0; i < b.N; i++ { + tree.ReplaceOrInsert(Int(b.N - i)) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + tree.DeleteMin() + } +} + +func TestInsertNoReplace(t *testing.T) { + tree := New() + n := 1000 + for q := 0; q < 2; q++ { + perm := rand.Perm(n) + for i := 0; i < n; i++ { + tree.InsertNoReplace(Int(perm[i])) + } + } + j := 0 + tree.AscendGreaterOrEqual(Int(0), func(item Item) bool { + if item.(Int) != Int(j/2) { + t.Fatalf("bad order") + } + j++ + return true + }) +} diff --git a/vendor/github.com/petar/GoLLRB/llrb/util.go b/vendor/github.com/petar/GoLLRB/llrb/util.go new file mode 100644 index 000000000000..63dbdb2df0a7 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/util.go @@ -0,0 +1,17 @@ +// Copyright 2010 Petar Maymounkov. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package llrb + +type Int int + +func (x Int) Less(than Item) bool { + return x < than.(Int) +} + +type String string + +func (x String) Less(than Item) bool { + return x < than.(String) +} diff --git a/vendor/github.com/peterbourgon/diskv/BUILD b/vendor/github.com/peterbourgon/diskv/BUILD index d3e29bc75094..2bb0c244dd4d 100644 --- a/vendor/github.com/peterbourgon/diskv/BUILD +++ b/vendor/github.com/peterbourgon/diskv/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -13,9 +8,34 @@ go_library( "index.go", ], importpath = "github.com/peterbourgon/diskv", + visibility = ["//visibility:public"], deps = ["//vendor/github.com/google/btree:go_default_library"], ) +go_test( + name = "go_default_test", + srcs = [ + "basic_test.go", + "compression_test.go", + "index_test.go", + "issues_test.go", + "speed_test.go", + "stream_test.go", + ], + importpath = "github.com/peterbourgon/diskv", + library = ":go_default_library", +) + +go_test( + name = "go_default_xtest", + srcs = [ + "import_test.go", + "keys_test.go", + ], + importpath = "github.com/peterbourgon/diskv_test", + deps = [":go_default_library"], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -27,4 +47,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/peterbourgon/diskv/basic_test.go b/vendor/github.com/peterbourgon/diskv/basic_test.go new file mode 100644 index 000000000000..7bf7eda05f2b --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/basic_test.go @@ -0,0 +1,253 @@ +package diskv + +import ( + "bytes" + "testing" + "time" +) + +func cmpBytes(a, b []byte) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} + +func (d *Diskv) isCached(key string) bool { + d.mu.RLock() + defer d.mu.RUnlock() + _, ok := d.cache[key] + return ok +} + +func TestWriteReadErase(t *testing.T) { + d := New(Options{ + BasePath: "test-data", + CacheSizeMax: 1024, + }) + defer d.EraseAll() + k, v := "a", []byte{'b'} + if err := d.Write(k, v); err != nil { + t.Fatalf("write: %s", err) + } + if readVal, err := d.Read(k); err != nil { + t.Fatalf("read: %s", err) + } else if bytes.Compare(v, readVal) != 0 { + t.Fatalf("read: expected %s, got %s", v, readVal) + } + if err := d.Erase(k); err != nil { + t.Fatalf("erase: %s", err) + } +} + +func TestWRECache(t *testing.T) { + d := New(Options{ + BasePath: "test-data", + CacheSizeMax: 1024, + }) + defer d.EraseAll() + k, v := "xxx", []byte{' ', ' ', ' '} + if d.isCached(k) { + t.Fatalf("key cached before Write and Read") + } + if err := d.Write(k, v); err != nil { + t.Fatalf("write: %s", err) + } + if d.isCached(k) { + t.Fatalf("key cached before Read") + } + if readVal, err := d.Read(k); err != nil { + t.Fatalf("read: %s", err) + } else if bytes.Compare(v, readVal) != 0 { + t.Fatalf("read: expected %s, got %s", v, readVal) + } + for i := 0; i < 10 && !d.isCached(k); i++ { + time.Sleep(10 * time.Millisecond) + } + if !d.isCached(k) { + t.Fatalf("key not cached after Read") + } + if err := d.Erase(k); err != nil { + t.Fatalf("erase: %s", err) + } + if d.isCached(k) { + t.Fatalf("key cached after Erase") + } +} + +func TestStrings(t *testing.T) { + d := New(Options{ + BasePath: "test-data", + CacheSizeMax: 1024, + }) + defer d.EraseAll() + + keys := map[string]bool{"a": false, "b": false, "c": false, "d": false} + v := []byte{'1'} + for k := range keys { + if err := d.Write(k, v); err != nil { + t.Fatalf("write: %s: %s", k, err) + } + } + + for k := range d.Keys(nil) { + if _, present := keys[k]; present { + t.Logf("got: %s", k) + keys[k] = true + } else { + t.Fatalf("strings() returns unknown key: %s", k) + } + } + + for k, found := range keys { + if !found { + t.Errorf("never got %s", k) + } + } +} + +func TestZeroByteCache(t *testing.T) { + d := New(Options{ + BasePath: "test-data", + CacheSizeMax: 0, + }) + defer d.EraseAll() + + k, v := "a", []byte{'1', '2', '3'} + if err := d.Write(k, v); err != nil { + t.Fatalf("Write: %s", err) + } + + if d.isCached(k) { + t.Fatalf("key cached, expected not-cached") + } + + if _, err := d.Read(k); err != nil { + t.Fatalf("Read: %s", err) + } + + if d.isCached(k) { + t.Fatalf("key cached, expected not-cached") + } +} + +func TestOneByteCache(t *testing.T) { + d := New(Options{ + BasePath: "test-data", + CacheSizeMax: 1, + }) + defer d.EraseAll() + + k1, k2, v1, v2 := "a", "b", []byte{'1'}, []byte{'1', '2'} + if err := d.Write(k1, v1); err != nil { + t.Fatal(err) + } + + if v, err := d.Read(k1); err != nil { + t.Fatal(err) + } else if !cmpBytes(v, v1) { + t.Fatalf("Read: expected %s, got %s", string(v1), string(v)) + } + + for i := 0; i < 10 && !d.isCached(k1); i++ { + time.Sleep(10 * time.Millisecond) + } + if !d.isCached(k1) { + t.Fatalf("expected 1-byte value to be cached, but it wasn't") + } + + if err := d.Write(k2, v2); err != nil { + t.Fatal(err) + } + if _, err := d.Read(k2); err != nil { + t.Fatalf("--> %s", err) + } + + for i := 0; i < 10 && (!d.isCached(k1) || d.isCached(k2)); i++ { + time.Sleep(10 * time.Millisecond) // just wait for lazy-cache + } + if !d.isCached(k1) { + t.Fatalf("1-byte value was uncached for no reason") + } + + if d.isCached(k2) { + t.Fatalf("2-byte value was cached, but cache max size is 1") + } +} + +func TestStaleCache(t *testing.T) { + d := New(Options{ + BasePath: "test-data", + CacheSizeMax: 1, + }) + defer d.EraseAll() + + k, first, second := "a", "first", "second" + if err := d.Write(k, []byte(first)); err != nil { + t.Fatal(err) + } + + v, err := d.Read(k) + if err != nil { + t.Fatal(err) + } + if string(v) != first { + t.Errorf("expected '%s', got '%s'", first, v) + } + + if err := d.Write(k, []byte(second)); err != nil { + t.Fatal(err) + } + + v, err = d.Read(k) + if err != nil { + t.Fatal(err) + } + + if string(v) != second { + t.Errorf("expected '%s', got '%s'", second, v) + } +} + +func TestHas(t *testing.T) { + d := New(Options{ + BasePath: "test-data", + CacheSizeMax: 1024, + }) + defer d.EraseAll() + + for k, v := range map[string]string{ + "a": "1", + "foo": "2", + "012345": "3", + } { + d.Write(k, []byte(v)) + } + + d.Read("foo") // cache one of them + if !d.isCached("foo") { + t.Errorf("'foo' didn't get cached") + } + + for _, tuple := range []struct { + key string + expected bool + }{ + {"a", true}, + {"b", false}, + {"foo", true}, + {"bar", false}, + {"01234", false}, + {"012345", true}, + {"0123456", false}, + } { + if expected, got := tuple.expected, d.Has(tuple.key); expected != got { + t.Errorf("Has(%s): expected %v, got %v", tuple.key, expected, got) + } + } +} diff --git a/vendor/github.com/peterbourgon/diskv/compression_test.go b/vendor/github.com/peterbourgon/diskv/compression_test.go new file mode 100644 index 000000000000..2d614203725f --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/compression_test.go @@ -0,0 +1,72 @@ +package diskv + +import ( + "compress/flate" + "fmt" + "math/rand" + "os" + "testing" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func testCompressionWith(t *testing.T, c Compression, name string) { + d := New(Options{ + BasePath: "compression-test", + CacheSizeMax: 0, + Compression: c, + }) + defer d.EraseAll() + + sz := 4096 + val := make([]byte, sz) + for i := 0; i < sz; i++ { + val[i] = byte('a' + rand.Intn(26)) // {a-z}; should compress some + } + + key := "a" + if err := d.Write(key, val); err != nil { + t.Fatalf("write failed: %s", err) + } + + targetFile := fmt.Sprintf("%s%c%s", d.BasePath, os.PathSeparator, key) + fi, err := os.Stat(targetFile) + if err != nil { + t.Fatalf("%s: %s", targetFile, err) + } + + if fi.Size() >= int64(sz) { + t.Fatalf("%s: size=%d, expected smaller", targetFile, fi.Size()) + } + t.Logf("%s compressed %d to %d", name, sz, fi.Size()) + + readVal, err := d.Read(key) + if len(readVal) != sz { + t.Fatalf("read: expected size=%d, got size=%d", sz, len(readVal)) + } + + for i := 0; i < sz; i++ { + if readVal[i] != val[i] { + t.Fatalf("i=%d: expected %v, got %v", i, val[i], readVal[i]) + } + } +} + +func TestGzipDefault(t *testing.T) { + testCompressionWith(t, NewGzipCompression(), "gzip") +} + +func TestGzipBestCompression(t *testing.T) { + testCompressionWith(t, NewGzipCompressionLevel(flate.BestCompression), "gzip-max") +} + +func TestGzipBestSpeed(t *testing.T) { + testCompressionWith(t, NewGzipCompressionLevel(flate.BestSpeed), "gzip-min") +} + +func TestZlib(t *testing.T) { + testCompressionWith(t, NewZlibCompression(), "zlib") +} diff --git a/vendor/github.com/peterbourgon/diskv/import_test.go b/vendor/github.com/peterbourgon/diskv/import_test.go new file mode 100644 index 000000000000..a08ac7c70cb3 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/import_test.go @@ -0,0 +1,76 @@ +package diskv_test + +import ( + "bytes" + "io/ioutil" + "os" + + "github.com/peterbourgon/diskv" + + "testing" +) + +func TestImportMove(t *testing.T) { + b := []byte(`0123456789`) + f, err := ioutil.TempFile("", "temp-test") + if err != nil { + t.Fatal(err) + } + if _, err := f.Write(b); err != nil { + t.Fatal(err) + } + f.Close() + + d := diskv.New(diskv.Options{ + BasePath: "test-import-move", + }) + defer d.EraseAll() + + key := "key" + + if err := d.Write(key, []byte(`TBD`)); err != nil { + t.Fatal(err) + } + + if err := d.Import(f.Name(), key, true); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(f.Name()); err == nil || !os.IsNotExist(err) { + t.Errorf("expected temp file to be gone, but err = %v", err) + } + + if !d.Has(key) { + t.Errorf("%q not present", key) + } + + if buf, err := d.Read(key); err != nil || bytes.Compare(b, buf) != 0 { + t.Errorf("want %q, have %q (err = %v)", string(b), string(buf), err) + } +} + +func TestImportCopy(t *testing.T) { + b := []byte(`¡åéîòü!`) + + f, err := ioutil.TempFile("", "temp-test") + if err != nil { + t.Fatal(err) + } + if _, err := f.Write(b); err != nil { + t.Fatal(err) + } + f.Close() + + d := diskv.New(diskv.Options{ + BasePath: "test-import-copy", + }) + defer d.EraseAll() + + if err := d.Import(f.Name(), "key", false); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(f.Name()); err != nil { + t.Errorf("expected temp file to remain, but got err = %v", err) + } +} diff --git a/vendor/github.com/peterbourgon/diskv/index_test.go b/vendor/github.com/peterbourgon/diskv/index_test.go new file mode 100644 index 000000000000..72f52a9ff92e --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/index_test.go @@ -0,0 +1,148 @@ +package diskv + +import ( + "bytes" + "reflect" + "testing" + "time" +) + +func strLess(a, b string) bool { return a < b } + +func cmpStrings(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} + +func (d *Diskv) isIndexed(key string) bool { + if d.Index == nil { + return false + } + + for _, got := range d.Index.Keys("", 1000) { + if got == key { + return true + } + } + return false +} + +func TestIndexOrder(t *testing.T) { + d := New(Options{ + BasePath: "index-test", + Transform: func(string) []string { return []string{} }, + CacheSizeMax: 1024, + Index: &BTreeIndex{}, + IndexLess: strLess, + }) + defer d.EraseAll() + + v := []byte{'1', '2', '3'} + d.Write("a", v) + if !d.isIndexed("a") { + t.Fatalf("'a' not indexed after write") + } + d.Write("1", v) + d.Write("m", v) + d.Write("-", v) + d.Write("A", v) + + expectedKeys := []string{"-", "1", "A", "a", "m"} + keys := []string{} + for _, key := range d.Index.Keys("", 100) { + keys = append(keys, key) + } + + if !cmpStrings(keys, expectedKeys) { + t.Fatalf("got %s, expected %s", keys, expectedKeys) + } +} + +func TestIndexLoad(t *testing.T) { + d1 := New(Options{ + BasePath: "index-test", + Transform: func(string) []string { return []string{} }, + CacheSizeMax: 1024, + }) + defer d1.EraseAll() + + val := []byte{'1', '2', '3'} + keys := []string{"a", "b", "c", "d", "e", "f", "g"} + for _, key := range keys { + d1.Write(key, val) + } + + d2 := New(Options{ + BasePath: "index-test", + Transform: func(string) []string { return []string{} }, + CacheSizeMax: 1024, + Index: &BTreeIndex{}, + IndexLess: strLess, + }) + defer d2.EraseAll() + + // check d2 has properly loaded existing d1 data + for _, key := range keys { + if !d2.isIndexed(key) { + t.Fatalf("key '%s' not indexed on secondary", key) + } + } + + // cache one + if readValue, err := d2.Read(keys[0]); err != nil { + t.Fatalf("%s", err) + } else if bytes.Compare(val, readValue) != 0 { + t.Fatalf("%s: got %s, expected %s", keys[0], readValue, val) + } + + // make sure it got cached + for i := 0; i < 10 && !d2.isCached(keys[0]); i++ { + time.Sleep(10 * time.Millisecond) + } + if !d2.isCached(keys[0]) { + t.Fatalf("key '%s' not cached", keys[0]) + } + + // kill the disk + d1.EraseAll() + + // cached value should still be there in the second + if readValue, err := d2.Read(keys[0]); err != nil { + t.Fatalf("%s", err) + } else if bytes.Compare(val, readValue) != 0 { + t.Fatalf("%s: got %s, expected %s", keys[0], readValue, val) + } + + // but not in the original + if _, err := d1.Read(keys[0]); err == nil { + t.Fatalf("expected error reading from flushed store") + } +} + +func TestIndexKeysEmptyFrom(t *testing.T) { + d := New(Options{ + BasePath: "index-test", + Transform: func(string) []string { return []string{} }, + CacheSizeMax: 1024, + Index: &BTreeIndex{}, + IndexLess: strLess, + }) + defer d.EraseAll() + + for _, k := range []string{"a", "c", "z", "b", "x", "b", "y"} { + d.Write(k, []byte("1")) + } + + want := []string{"a", "b", "c", "x", "y", "z"} + have := d.Index.Keys("", 99) + if !reflect.DeepEqual(want, have) { + t.Errorf("want %v, have %v", want, have) + } +} diff --git a/vendor/github.com/peterbourgon/diskv/issues_test.go b/vendor/github.com/peterbourgon/diskv/issues_test.go new file mode 100644 index 000000000000..0b0b10908237 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/issues_test.go @@ -0,0 +1,121 @@ +package diskv + +import ( + "bytes" + "io/ioutil" + "sync" + "testing" + "time" +) + +// ReadStream from cache shouldn't panic on a nil dereference from a nonexistent +// Compression :) +func TestIssue2A(t *testing.T) { + d := New(Options{ + BasePath: "test-issue-2a", + Transform: func(string) []string { return []string{} }, + CacheSizeMax: 1024, + }) + defer d.EraseAll() + + input := "abcdefghijklmnopqrstuvwxy" + key, writeBuf, sync := "a", bytes.NewBufferString(input), false + if err := d.WriteStream(key, writeBuf, sync); err != nil { + t.Fatal(err) + } + + for i := 0; i < 2; i++ { + began := time.Now() + rc, err := d.ReadStream(key, false) + if err != nil { + t.Fatal(err) + } + buf, err := ioutil.ReadAll(rc) + if err != nil { + t.Fatal(err) + } + if !cmpBytes(buf, []byte(input)) { + t.Fatalf("read #%d: '%s' != '%s'", i+1, string(buf), input) + } + rc.Close() + t.Logf("read #%d in %s", i+1, time.Since(began)) + } +} + +// ReadStream on a key that resolves to a directory should return an error. +func TestIssue2B(t *testing.T) { + blockTransform := func(s string) []string { + transformBlockSize := 3 + sliceSize := len(s) / transformBlockSize + pathSlice := make([]string, sliceSize) + for i := 0; i < sliceSize; i++ { + from, to := i*transformBlockSize, (i*transformBlockSize)+transformBlockSize + pathSlice[i] = s[from:to] + } + return pathSlice + } + + d := New(Options{ + BasePath: "test-issue-2b", + Transform: blockTransform, + CacheSizeMax: 0, + }) + defer d.EraseAll() + + v := []byte{'1', '2', '3'} + if err := d.Write("abcabc", v); err != nil { + t.Fatal(err) + } + + _, err := d.ReadStream("abc", false) + if err == nil { + t.Fatal("ReadStream('abc') should return error") + } + t.Logf("ReadStream('abc') returned error: %v", err) +} + +// Ensure ReadStream with direct=true isn't racy. +func TestIssue17(t *testing.T) { + var ( + basePath = "test-data" + ) + + dWrite := New(Options{ + BasePath: basePath, + CacheSizeMax: 0, + }) + defer dWrite.EraseAll() + + dRead := New(Options{ + BasePath: basePath, + CacheSizeMax: 50, + }) + + cases := map[string]string{ + "a": `1234567890`, + "b": `2345678901`, + "c": `3456789012`, + "d": `4567890123`, + "e": `5678901234`, + } + + for k, v := range cases { + if err := dWrite.Write(k, []byte(v)); err != nil { + t.Fatalf("during write: %s", err) + } + dRead.Read(k) // ensure it's added to cache + } + + var wg sync.WaitGroup + start := make(chan struct{}) + for k, v := range cases { + wg.Add(1) + go func(k, v string) { + <-start + dRead.ReadStream(k, true) + wg.Done() + }(k, v) + } + close(start) + wg.Wait() +} diff --git a/vendor/github.com/peterbourgon/diskv/keys_test.go b/vendor/github.com/peterbourgon/diskv/keys_test.go new file mode 100644 index 000000000000..222e1c44414d --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/keys_test.go @@ -0,0 +1,231 @@ +package diskv_test + +import ( + "reflect" + "runtime" + "strings" + "testing" + + "github.com/peterbourgon/diskv" +) + +var ( + keysTestData = map[string]string{ + "ab01cd01": "When we started building CoreOS", + "ab01cd02": "we looked at all the various components available to us", + "ab01cd03": "re-using the best tools", + "ef01gh04": "and building the ones that did not exist", + "ef02gh05": "We believe strongly in the Unix philosophy", + "xxxxxxxx": "tools should be independently useful", + } + + prefixes = []string{ + "", // all + "a", + "ab", + "ab0", + "ab01", + "ab01cd0", + "ab01cd01", + "ab01cd01x", // none + "b", // none + "b0", // none + "0", // none + "01", // none + "e", + "ef", + "efx", // none + "ef01gh0", + "ef01gh04", + "ef01gh05", + "ef01gh06", // none + } +) + +func TestKeysFlat(t *testing.T) { + transform := func(s string) []string { + if s == "" { + t.Fatalf(`transform should not be called with ""`) + } + return []string{} + } + d := diskv.New(diskv.Options{ + BasePath: "test-data", + Transform: transform, + }) + defer d.EraseAll() + + for k, v := range keysTestData { + d.Write(k, []byte(v)) + } + + checkKeys(t, d.Keys(nil), keysTestData) +} + +func TestKeysNested(t *testing.T) { + d := diskv.New(diskv.Options{ + BasePath: "test-data", + Transform: blockTransform(2), + }) + defer d.EraseAll() + + for k, v := range keysTestData { + d.Write(k, []byte(v)) + } + + checkKeys(t, d.Keys(nil), keysTestData) +} + +func TestKeysPrefixFlat(t *testing.T) { + d := diskv.New(diskv.Options{ + BasePath: "test-data", + }) + defer d.EraseAll() + + for k, v := range keysTestData { + d.Write(k, []byte(v)) + } + + for _, prefix := range prefixes { + checkKeys(t, d.KeysPrefix(prefix, nil), filterPrefix(keysTestData, prefix)) + } +} + +func TestKeysPrefixNested(t *testing.T) { + d := diskv.New(diskv.Options{ + BasePath: "test-data", + Transform: blockTransform(2), + }) + defer d.EraseAll() + + for k, v := range keysTestData { + d.Write(k, []byte(v)) + } + + for _, prefix := range prefixes { + checkKeys(t, d.KeysPrefix(prefix, nil), filterPrefix(keysTestData, prefix)) + } +} + +func TestKeysCancel(t *testing.T) { + d := diskv.New(diskv.Options{ + BasePath: "test-data", + }) + defer d.EraseAll() + + for k, v := range keysTestData { + d.Write(k, []byte(v)) + } + + var ( + cancel = make(chan struct{}) + received = 0 + cancelAfter = len(keysTestData) / 2 + ) + + for key := range d.Keys(cancel) { + received++ + + if received >= cancelAfter { + close(cancel) + runtime.Gosched() // allow walker to detect cancel + } + + t.Logf("received %d: %q", received, key) + } + + if want, have := cancelAfter, received; want != have { + t.Errorf("want %d, have %d") + } +} + +func checkKeys(t *testing.T, c <-chan string, want map[string]string) { + for k := range c { + if _, ok := want[k]; !ok { + t.Errorf("%q yielded but not expected", k) + continue + } + + delete(want, k) + t.Logf("%q yielded OK", k) + } + + if len(want) != 0 { + t.Errorf("%d expected key(s) not yielded: %s", len(want), strings.Join(flattenKeys(want), ", ")) + } +} + +func blockTransform(blockSize int) func(string) []string { + return func(s string) []string { + var ( + sliceSize = len(s) / blockSize + pathSlice = make([]string, sliceSize) + ) + for i := 0; i < sliceSize; i++ { + from, to := i*blockSize, (i*blockSize)+blockSize + pathSlice[i] = s[from:to] + } + return pathSlice + } +} + +func filterPrefix(in map[string]string, prefix string) map[string]string { + out := map[string]string{} + for k, v := range in { + if strings.HasPrefix(k, prefix) { + out[k] = v + } + } + return out +} + +func TestFilterPrefix(t *testing.T) { + input := map[string]string{ + "all": "", + "and": "", + "at": "", + "available": "", + "best": "", + "building": "", + "components": "", + "coreos": "", + "did": "", + "exist": "", + "looked": "", + "not": "", + "ones": "", + "re-using": "", + "started": "", + "that": "", + "the": "", + "to": "", + "tools": "", + "us": "", + "various": "", + "we": "", + "when": "", + } + + for prefix, want := range map[string]map[string]string{ + "a": map[string]string{"all": "", "and": "", "at": "", "available": ""}, + "al": map[string]string{"all": ""}, + "all": map[string]string{"all": ""}, + "alll": map[string]string{}, + "c": map[string]string{"components": "", "coreos": ""}, + "co": map[string]string{"components": "", "coreos": ""}, + "com": map[string]string{"components": ""}, + } { + have := filterPrefix(input, prefix) + if !reflect.DeepEqual(want, have) { + t.Errorf("%q: want %v, have %v", prefix, flattenKeys(want), flattenKeys(have)) + } + } +} + +func flattenKeys(m map[string]string) []string { + a := make([]string, 0, len(m)) + for k := range m { + a = append(a, k) + } + return a +} diff --git a/vendor/github.com/peterbourgon/diskv/speed_test.go b/vendor/github.com/peterbourgon/diskv/speed_test.go new file mode 100644 index 000000000000..67d05ff27e0b --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/speed_test.go @@ -0,0 +1,153 @@ +package diskv + +import ( + "fmt" + "math/rand" + "testing" +) + +func shuffle(keys []string) { + ints := rand.Perm(len(keys)) + for i := range keys { + keys[i], keys[ints[i]] = keys[ints[i]], keys[i] + } +} + +func genValue(size int) []byte { + v := make([]byte, size) + for i := 0; i < size; i++ { + v[i] = uint8((rand.Int() % 26) + 97) // a-z + } + return v +} + +const ( + keyCount = 1000 +) + +func genKeys() []string { + keys := make([]string, keyCount) + for i := 0; i < keyCount; i++ { + keys[i] = fmt.Sprintf("%d", i) + } + return keys +} + +func (d *Diskv) load(keys []string, val []byte) { + for _, key := range keys { + d.Write(key, val) + } +} + +func benchRead(b *testing.B, size, cachesz int) { + b.StopTimer() + d := New(Options{ + BasePath: "speed-test", + Transform: func(string) []string { return []string{} }, + CacheSizeMax: uint64(cachesz), + }) + defer d.EraseAll() + + keys := genKeys() + value := genValue(size) + d.load(keys, value) + shuffle(keys) + b.SetBytes(int64(size)) + + b.StartTimer() + for i := 0; i < b.N; i++ { + _, _ = d.Read(keys[i%len(keys)]) + } + b.StopTimer() +} + +func benchWrite(b *testing.B, size int, withIndex bool) { + b.StopTimer() + + options := Options{ + BasePath: "speed-test", + Transform: func(string) []string { return []string{} }, + CacheSizeMax: 0, + } + if withIndex { + options.Index = &BTreeIndex{} + options.IndexLess = strLess + } + + d := New(options) + defer d.EraseAll() + keys := genKeys() + value := genValue(size) + shuffle(keys) + b.SetBytes(int64(size)) + + b.StartTimer() + for i := 0; i < b.N; i++ { + d.Write(keys[i%len(keys)], value) + } + b.StopTimer() +} + +func BenchmarkWrite__32B_NoIndex(b *testing.B) { + benchWrite(b, 32, false) +} + +func BenchmarkWrite__1KB_NoIndex(b *testing.B) { + benchWrite(b, 1024, false) +} + +func BenchmarkWrite__4KB_NoIndex(b *testing.B) { + benchWrite(b, 4096, false) +} + +func BenchmarkWrite_10KB_NoIndex(b *testing.B) { + benchWrite(b, 10240, false) +} + +func BenchmarkWrite__32B_WithIndex(b *testing.B) { + benchWrite(b, 32, true) +} + +func BenchmarkWrite__1KB_WithIndex(b *testing.B) { + benchWrite(b, 1024, true) +} + +func BenchmarkWrite__4KB_WithIndex(b *testing.B) { + benchWrite(b, 4096, true) +} + +func BenchmarkWrite_10KB_WithIndex(b *testing.B) { + benchWrite(b, 10240, true) +} + +func BenchmarkRead__32B_NoCache(b *testing.B) { + benchRead(b, 32, 0) +} + +func BenchmarkRead__1KB_NoCache(b *testing.B) { + benchRead(b, 1024, 0) +} + +func BenchmarkRead__4KB_NoCache(b *testing.B) { + benchRead(b, 4096, 0) +} + +func BenchmarkRead_10KB_NoCache(b *testing.B) { + benchRead(b, 10240, 0) +} + +func BenchmarkRead__32B_WithCache(b *testing.B) { + benchRead(b, 32, keyCount*32*2) +} + +func BenchmarkRead__1KB_WithCache(b *testing.B) { + benchRead(b, 1024, keyCount*1024*2) +} + +func BenchmarkRead__4KB_WithCache(b *testing.B) { + benchRead(b, 4096, keyCount*4096*2) +} + +func BenchmarkRead_10KB_WithCache(b *testing.B) { + benchRead(b, 10240, keyCount*4096*2) +} diff --git a/vendor/github.com/peterbourgon/diskv/stream_test.go b/vendor/github.com/peterbourgon/diskv/stream_test.go new file mode 100644 index 000000000000..7991dbff28d7 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/stream_test.go @@ -0,0 +1,117 @@ +package diskv + +import ( + "bytes" + "io/ioutil" + "testing" +) + +func TestBasicStreamCaching(t *testing.T) { + d := New(Options{ + BasePath: "test-data", + CacheSizeMax: 1024, + }) + defer d.EraseAll() + + input := "a1b2c3" + key, writeBuf, sync := "a", bytes.NewBufferString(input), true + if err := d.WriteStream(key, writeBuf, sync); err != nil { + t.Fatal(err) + } + + if d.isCached(key) { + t.Fatalf("'%s' cached, but shouldn't be (yet)", key) + } + + rc, err := d.ReadStream(key, false) + if err != nil { + t.Fatal(err) + } + + readBuf, err := ioutil.ReadAll(rc) + if err != nil { + t.Fatal(err) + } + + if !cmpBytes(readBuf, []byte(input)) { + t.Fatalf("'%s' != '%s'", string(readBuf), input) + } + + if !d.isCached(key) { + t.Fatalf("'%s' isn't cached, but should be", key) + } +} + +func TestReadStreamDirect(t *testing.T) { + var ( + basePath = "test-data" + ) + dWrite := New(Options{ + BasePath: basePath, + CacheSizeMax: 0, + }) + defer dWrite.EraseAll() + dRead := New(Options{ + BasePath: basePath, + CacheSizeMax: 1024, + }) + + // Write + key, val1, val2 := "a", []byte(`1234567890`), []byte(`aaaaaaaaaa`) + if err := dWrite.Write(key, val1); err != nil { + t.Fatalf("during first write: %s", err) + } + + // First, caching read. + val, err := dRead.Read(key) + if err != nil { + t.Fatalf("during initial read: %s", err) + } + t.Logf("read 1: %s => %s", key, string(val)) + if !cmpBytes(val1, val) { + t.Errorf("expected %q, got %q", string(val1), string(val)) + } + if !dRead.isCached(key) { + t.Errorf("%q should be cached, but isn't", key) + } + + // Write a different value. + if err := dWrite.Write(key, val2); err != nil { + t.Fatalf("during second write: %s", err) + } + + // Second read, should hit cache and get the old value. + val, err = dRead.Read(key) + if err != nil { + t.Fatalf("during second (cache-hit) read: %s", err) + } + t.Logf("read 2: %s => %s", key, string(val)) + if !cmpBytes(val1, val) { + t.Errorf("expected %q, got %q", string(val1), string(val)) + } + + // Third, direct read, should get the updated value. + rc, err := dRead.ReadStream(key, true) + if err != nil { + t.Fatalf("during third (direct) read, ReadStream: %s", err) + } + defer rc.Close() + val, err = ioutil.ReadAll(rc) + if err != nil { + t.Fatalf("during third (direct) read, ReadAll: %s", err) + } + t.Logf("read 3: %s => %s", key, string(val)) + if !cmpBytes(val2, val) { + t.Errorf("expected %q, got %q", string(val1), string(val)) + } + + // Fourth read, should hit cache and get the new value. + val, err = dRead.Read(key) + if err != nil { + t.Fatalf("during fourth (cache-hit) read: %s", err) + } + t.Logf("read 4: %s => %s", key, string(val)) + if !cmpBytes(val2, val) { + t.Errorf("expected %q, got %q", string(val1), string(val)) + } +} diff --git a/vendor/github.com/prometheus/client_golang/.gitignore b/vendor/github.com/prometheus/client_golang/.gitignore new file mode 100644 index 000000000000..f6fc2e8eb2e8 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +*~ +*# +.build diff --git a/vendor/github.com/prometheus/client_golang/.travis.yml b/vendor/github.com/prometheus/client_golang/.travis.yml new file mode 100644 index 000000000000..d83f31a59af9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/.travis.yml @@ -0,0 +1,9 @@ +sudo: false +language: go + +go: + - 1.5.4 + - 1.6.2 + +script: + - go test -short ./... diff --git a/vendor/github.com/prometheus/client_golang/AUTHORS.md b/vendor/github.com/prometheus/client_golang/AUTHORS.md new file mode 100644 index 000000000000..c5275d5ab194 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/AUTHORS.md @@ -0,0 +1,18 @@ +The Prometheus project was started by Matt T. Proud (emeritus) and +Julius Volz in 2012. + +Maintainers of this repository: + +* Björn Rabenstein + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Bernerd Schaefer +* Björn Rabenstein +* Daniel Bornkessel +* Jeff Younker +* Julius Volz +* Matt T. Proud +* Tobias Schmidt + diff --git a/vendor/github.com/prometheus/client_golang/CHANGELOG.md b/vendor/github.com/prometheus/client_golang/CHANGELOG.md new file mode 100644 index 000000000000..330788a4ee45 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/CHANGELOG.md @@ -0,0 +1,109 @@ +## 0.8.0 / 2016-08-17 +* [CHANGE] Registry is doing more consistency checks. This might break + existing setups that used to export inconsistent metrics. +* [CHANGE] Pushing to Pushgateway moved to package `push` and changed to allow + arbitrary grouping. +* [CHANGE] Removed `SelfCollector`. +* [CHANGE] Removed `PanicOnCollectError` and `EnableCollectChecks` methods. +* [CHANGE] Moved packages to the prometheus/common repo: `text`, `model`, + `extraction`. +* [CHANGE] Deprecated a number of functions. +* [FEATURE] Allow custom registries. Added `Registerer` and `Gatherer` + interfaces. +* [FEATURE] Separated HTTP exposition, allowing custom HTTP handlers (package + `promhttp`) and enabling the creation of other exposition mechanisms. +* [FEATURE] `MustRegister` is variadic now, allowing registration of many + collectors in one call. +* [FEATURE] Added HTTP API v1 package. +* [ENHANCEMENT] Numerous documentation improvements. +* [ENHANCEMENT] Improved metric sorting. +* [ENHANCEMENT] Inlined fnv64a hashing for improved performance. +* [ENHANCEMENT] Several test improvements. +* [BUGFIX] Handle collisions in MetricVec. + +## 0.7.0 / 2015-07-27 +* [CHANGE] Rename ExporterLabelPrefix to ExportedLabelPrefix. +* [BUGFIX] Closed gaps in metric consistency check. +* [BUGFIX] Validate LabelName/LabelSet on JSON unmarshaling. +* [ENHANCEMENT] Document the possibility to create "empty" metrics in + a metric vector. +* [ENHANCEMENT] Fix and clarify various doc comments and the README.md. +* [ENHANCEMENT] (Kind of) solve "The Proxy Problem" of http.InstrumentHandler. +* [ENHANCEMENT] Change responseWriterDelegator.written to int64. + +## 0.6.0 / 2015-06-01 +* [CHANGE] Rename process_goroutines to go_goroutines. +* [ENHANCEMENT] Validate label names during YAML decoding. +* [ENHANCEMENT] Add LabelName regular expression. +* [BUGFIX] Ensure alignment of struct members for 32-bit systems. + +## 0.5.0 / 2015-05-06 +* [BUGFIX] Removed a weakness in the fingerprinting aka signature code. + This makes fingerprinting slower and more allocation-heavy, but the + weakness was too severe to be tolerated. +* [CHANGE] As a result of the above, Metric.Fingerprint is now returning + a different fingerprint. To keep the same fingerprint, the new method + Metric.FastFingerprint was introduced, which will be used by the + Prometheus server for storage purposes (implying that a collision + detection has to be added, too). +* [ENHANCEMENT] The Metric.Equal and Metric.Before do not depend on + fingerprinting anymore, removing the possibility of an undetected + fingerprint collision. +* [FEATURE] The Go collector in the exposition library includes garbage + collection stats. +* [FEATURE] The exposition library allows to create constant "throw-away" + summaries and histograms. +* [CHANGE] A number of new reserved labels and prefixes. + +## 0.4.0 / 2015-04-08 +* [CHANGE] Return NaN when Summaries have no observations yet. +* [BUGFIX] Properly handle Summary decay upon Write(). +* [BUGFIX] Fix the documentation link to the consumption library. +* [FEATURE] Allow the metric family injection hook to merge with existing + metric families. +* [ENHANCEMENT] Removed cgo dependency and conditional compilation of procfs. +* [MAINTENANCE] Adjusted to changes in matttproud/golang_protobuf_extensions. + +## 0.3.2 / 2015-03-11 +* [BUGFIX] Fixed the receiver type of COWMetric.Set(). This method is + only used by the Prometheus server internally. +* [CLEANUP] Added licenses of vendored code left out by godep. + +## 0.3.1 / 2015-03-04 +* [ENHANCEMENT] Switched fingerprinting functions from own free list to + sync.Pool. +* [CHANGE] Makefile uses Go 1.4.2 now (only relevant for examples and tests). + +## 0.3.0 / 2015-03-03 +* [CHANGE] Changed the fingerprinting for metrics. THIS WILL INVALIDATE ALL + PERSISTED FINGERPRINTS. IF YOU COMPILE THE PROMETHEUS SERVER WITH THIS + VERSION, YOU HAVE TO WIPE THE PREVIOUSLY CREATED STORAGE. +* [CHANGE] LabelValuesToSignature removed. (Nobody had used it, and it was + arguably broken.) +* [CHANGE] Vendored dependencies. Those are only used by the Makefile. If + client_golang is used as a library, the vendoring will stay out of your way. +* [BUGFIX] Remove a weakness in the fingerprinting for metrics. (This made + the fingerprinting change above necessary.) +* [FEATURE] Added new fingerprinting functions SignatureForLabels and + SignatureWithoutLabels to be used by the Prometheus server. These functions + require fewer allocations than the ones currently used by the server. + +## 0.2.0 / 2015-02-23 +* [FEATURE] Introduce new Histagram metric type. +* [CHANGE] Ignore process collector errors for now (better error handling + pending). +* [CHANGE] Use clear error interface for process pidFn. +* [BUGFIX] Fix Go download links for several archs and OSes. +* [ENHANCEMENT] Massively improve Gauge and Counter performance. +* [ENHANCEMENT] Catch illegal label names for summaries in histograms. +* [ENHANCEMENT] Reduce allocations during fingerprinting. +* [ENHANCEMENT] Remove cgo dependency. procfs package will only be included if + both cgo is available and the build is for an OS with procfs. +* [CLEANUP] Clean up code style issues. +* [CLEANUP] Mark slow test as such and exclude them from travis. +* [CLEANUP] Update protobuf library package name. +* [CLEANUP] Updated vendoring of beorn7/perks. + +## 0.1.0 / 2015-02-02 +* [CLEANUP] Introduced semantic versioning and changelog. From now on, + changes will be reported in this file. diff --git a/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md b/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md new file mode 100644 index 000000000000..5705f0fbea29 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/client_golang/README.md b/vendor/github.com/prometheus/client_golang/README.md new file mode 100644 index 000000000000..557eacf5a824 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/README.md @@ -0,0 +1,45 @@ +# Prometheus Go client library + +[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang) + +This is the [Go](http://golang.org) client library for +[Prometheus](http://prometheus.io). It has two separate parts, one for +instrumenting application code, and one for creating clients that talk to the +Prometheus HTTP API. + +## Instrumenting applications + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus) + +The +[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus) +contains the instrumentation library. See the +[best practices section](http://prometheus.io/docs/practices/naming/) of the +Prometheus documentation to learn more about instrumenting applications. + +The +[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples) +contains simple examples of instrumented code. + +## Client for the Prometheus HTTP API + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api/prometheus) + +The +[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus) +contains the client for the +[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you +to write Go applications that query time series data from a Prometheus server. + +## Where is `model`, `extraction`, and `text`? + +The `model` packages has been moved to +[`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model). + +The `extraction` and `text` packages are now contained in +[`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt). + +## Contributing and community + +See the [contributing guidelines](CONTRIBUTING.md) and the +[Community section](http://prometheus.io/community/) of the homepage. diff --git a/vendor/github.com/prometheus/client_golang/VERSION b/vendor/github.com/prometheus/client_golang/VERSION new file mode 100644 index 000000000000..a3df0a6959e1 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/VERSION @@ -0,0 +1 @@ +0.8.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/BUILD b/vendor/github.com/prometheus/client_golang/prometheus/BUILD index 014740d975ce..93cc06465e37 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/BUILD +++ b/vendor/github.com/prometheus/client_golang/prometheus/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -27,6 +22,7 @@ go_library( "vec.go", ], importpath = "github.com/prometheus/client_golang/prometheus", + visibility = ["//visibility:public"], deps = [ "//vendor/github.com/beorn7/perks/quantile:go_default_library", "//vendor/github.com/golang/protobuf/proto:go_default_library", @@ -37,6 +33,47 @@ go_library( ], ) +go_test( + name = "go_default_test", + srcs = [ + "benchmark_test.go", + "counter_test.go", + "gauge_test.go", + "go_collector_test.go", + "histogram_test.go", + "http_test.go", + "metric_test.go", + "process_collector_test.go", + "summary_test.go", + "vec_test.go", + ], + importpath = "github.com/prometheus/client_golang/prometheus", + library = ":go_default_library", + deps = [ + "//vendor/github.com/prometheus/client_model/go:go_default_library", + "//vendor/github.com/prometheus/common/expfmt:go_default_library", + "//vendor/github.com/prometheus/procfs:go_default_library", + ], +) + +go_test( + name = "go_default_xtest", + srcs = [ + "example_clustermanager_test.go", + "examples_test.go", + "expvar_collector_test.go", + "registry_test.go", + ], + importpath = "github.com/prometheus/client_golang/prometheus_test", + deps = [ + ":go_default_library", + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus/promhttp:go_default_library", + "//vendor/github.com/prometheus/client_model/go:go_default_library", + "//vendor/github.com/prometheus/common/expfmt:go_default_library", + ], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -52,4 +89,5 @@ filegroup( "//vendor/github.com/prometheus/client_golang/prometheus/push:all-srcs", ], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go b/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go new file mode 100644 index 000000000000..a3d86698bfc7 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go @@ -0,0 +1,183 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "sync" + "testing" +) + +func BenchmarkCounterWithLabelValues(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Inc() + } +} + +func BenchmarkCounterWithLabelValuesConcurrent(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + wg := sync.WaitGroup{} + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + for j := 0; j < b.N/10; j++ { + m.WithLabelValues("eins", "zwei", "drei").Inc() + } + wg.Done() + }() + } + wg.Wait() +} + +func BenchmarkCounterWithMappedLabels(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.With(Labels{"two": "zwei", "one": "eins", "three": "drei"}).Inc() + } +} + +func BenchmarkCounterWithPreparedMappedLabels(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + labels := Labels{"two": "zwei", "one": "eins", "three": "drei"} + for i := 0; i < b.N; i++ { + m.With(labels).Inc() + } +} + +func BenchmarkCounterNoLabels(b *testing.B) { + m := NewCounter(CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Inc() + } +} + +func BenchmarkGaugeWithLabelValues(b *testing.B) { + m := NewGaugeVec( + GaugeOpts{ + Name: "benchmark_gauge", + Help: "A gauge to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Set(3.1415) + } +} + +func BenchmarkGaugeNoLabels(b *testing.B) { + m := NewGauge(GaugeOpts{ + Name: "benchmark_gauge", + Help: "A gauge to benchmark it.", + }) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Set(3.1415) + } +} + +func BenchmarkSummaryWithLabelValues(b *testing.B) { + m := NewSummaryVec( + SummaryOpts{ + Name: "benchmark_summary", + Help: "A summary to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) + } +} + +func BenchmarkSummaryNoLabels(b *testing.B) { + m := NewSummary(SummaryOpts{ + Name: "benchmark_summary", + Help: "A summary to benchmark it.", + }, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Observe(3.1415) + } +} + +func BenchmarkHistogramWithLabelValues(b *testing.B) { + m := NewHistogramVec( + HistogramOpts{ + Name: "benchmark_histogram", + Help: "A histogram to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) + } +} + +func BenchmarkHistogramNoLabels(b *testing.B) { + m := NewHistogram(HistogramOpts{ + Name: "benchmark_histogram", + Help: "A histogram to benchmark it.", + }, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Observe(3.1415) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go b/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go new file mode 100644 index 000000000000..67391a23aa8c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go @@ -0,0 +1,58 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "testing" + + dto "github.com/prometheus/client_model/go" +) + +func TestCounterAdd(t *testing.T) { + counter := NewCounter(CounterOpts{ + Name: "test", + Help: "test help", + ConstLabels: Labels{"a": "1", "b": "2"}, + }).(*counter) + counter.Inc() + if expected, got := 1., math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + counter.Add(42) + if expected, got := 43., math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + + if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got { + t.Errorf("Expected error %q, got %q.", expected, got) + } + + m := &dto.Metric{} + counter.Write(m) + + if expected, got := `label: label: counter: `, m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} + +func decreaseCounter(c *counter) (err error) { + defer func() { + if e := recover(); e != nil { + err = e.(error) + } + }() + c.Add(-1) + return nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go b/vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go new file mode 100644 index 000000000000..260c1b52ded4 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go @@ -0,0 +1,118 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import "github.com/prometheus/client_golang/prometheus" + +// ClusterManager is an example for a system that might have been built without +// Prometheus in mind. It models a central manager of jobs running in a +// cluster. To turn it into something that collects Prometheus metrics, we +// simply add the two methods required for the Collector interface. +// +// An additional challenge is that multiple instances of the ClusterManager are +// run within the same binary, each in charge of a different zone. We need to +// make use of ConstLabels to be able to register each ClusterManager instance +// with Prometheus. +type ClusterManager struct { + Zone string + OOMCountDesc *prometheus.Desc + RAMUsageDesc *prometheus.Desc + // ... many more fields +} + +// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a +// real cluster manager would have to do. Since it may actually be really +// expensive, it must only be called once per collection. This implementation, +// obviously, only returns some made-up data. +func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() ( + oomCountByHost map[string]int, ramUsageByHost map[string]float64, +) { + // Just example fake data. + oomCountByHost = map[string]int{ + "foo.example.org": 42, + "bar.example.org": 2001, + } + ramUsageByHost = map[string]float64{ + "foo.example.org": 6.023e23, + "bar.example.org": 3.14, + } + return +} + +// Describe simply sends the two Descs in the struct to the channel. +func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) { + ch <- c.OOMCountDesc + ch <- c.RAMUsageDesc +} + +// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it +// creates constant metrics for each host on the fly based on the returned data. +// +// Note that Collect could be called concurrently, so we depend on +// ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe. +func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) { + oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState() + for host, oomCount := range oomCountByHost { + ch <- prometheus.MustNewConstMetric( + c.OOMCountDesc, + prometheus.CounterValue, + float64(oomCount), + host, + ) + } + for host, ramUsage := range ramUsageByHost { + ch <- prometheus.MustNewConstMetric( + c.RAMUsageDesc, + prometheus.GaugeValue, + ramUsage, + host, + ) + } +} + +// NewClusterManager creates the two Descs OOMCountDesc and RAMUsageDesc. Note +// that the zone is set as a ConstLabel. (It's different in each instance of the +// ClusterManager, but constant over the lifetime of an instance.) Then there is +// a variable label "host", since we want to partition the collected metrics by +// host. Since all Descs created in this way are consistent across instances, +// with a guaranteed distinction by the "zone" label, we can register different +// ClusterManager instances with the same registry. +func NewClusterManager(zone string) *ClusterManager { + return &ClusterManager{ + Zone: zone, + OOMCountDesc: prometheus.NewDesc( + "clustermanager_oom_crashes_total", + "Number of OOM crashes.", + []string{"host"}, + prometheus.Labels{"zone": zone}, + ), + RAMUsageDesc: prometheus.NewDesc( + "clustermanager_ram_usage_bytes", + "RAM usage as reported to the cluster manager.", + []string{"host"}, + prometheus.Labels{"zone": zone}, + ), + } +} + +func ExampleCollector() { + workerDB := NewClusterManager("db") + workerCA := NewClusterManager("ca") + + // Since we are dealing with custom Collector implementations, it might + // be a good idea to try it out with a pedantic registry. + reg := prometheus.NewPedanticRegistry() + reg.MustRegister(workerDB) + reg.MustRegister(workerCA) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go b/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go new file mode 100644 index 000000000000..f87f21a8f4f3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go @@ -0,0 +1,751 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "bytes" + "fmt" + "math" + "net/http" + "runtime" + "sort" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + + "github.com/golang/protobuf/proto" + + "github.com/prometheus/client_golang/prometheus" +) + +func ExampleGauge() { + opsQueued := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "our_company", + Subsystem: "blob_storage", + Name: "ops_queued", + Help: "Number of blob storage operations waiting to be processed.", + }) + prometheus.MustRegister(opsQueued) + + // 10 operations queued by the goroutine managing incoming requests. + opsQueued.Add(10) + // A worker goroutine has picked up a waiting operation. + opsQueued.Dec() + // And once more... + opsQueued.Dec() +} + +func ExampleGaugeVec() { + opsQueued := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "our_company", + Subsystem: "blob_storage", + Name: "ops_queued", + Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.", + }, + []string{ + // Which user has requested the operation? + "user", + // Of what type is the operation? + "type", + }, + ) + prometheus.MustRegister(opsQueued) + + // Increase a value using compact (but order-sensitive!) WithLabelValues(). + opsQueued.WithLabelValues("bob", "put").Add(4) + // Increase a value with a map using WithLabels. More verbose, but order + // doesn't matter anymore. + opsQueued.With(prometheus.Labels{"type": "delete", "user": "alice"}).Inc() +} + +func ExampleGaugeFunc() { + if err := prometheus.Register(prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Subsystem: "runtime", + Name: "goroutines_count", + Help: "Number of goroutines that currently exist.", + }, + func() float64 { return float64(runtime.NumGoroutine()) }, + )); err == nil { + fmt.Println("GaugeFunc 'goroutines_count' registered.") + } + // Note that the count of goroutines is a gauge (and not a counter) as + // it can go up and down. + + // Output: + // GaugeFunc 'goroutines_count' registered. +} + +func ExampleCounter() { + pushCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "repository_pushes", // Note: No help string... + }) + err := prometheus.Register(pushCounter) // ... so this will return an error. + if err != nil { + fmt.Println("Push counter couldn't be registered, no counting will happen:", err) + return + } + + // Try it once more, this time with a help string. + pushCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "repository_pushes", + Help: "Number of pushes to external repository.", + }) + err = prometheus.Register(pushCounter) + if err != nil { + fmt.Println("Push counter couldn't be registered AGAIN, no counting will happen:", err) + return + } + + pushComplete := make(chan struct{}) + // TODO: Start a goroutine that performs repository pushes and reports + // each completion via the channel. + for _ = range pushComplete { + pushCounter.Inc() + } + // Output: + // Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: "repository_pushes", help: "", constLabels: {}, variableLabels: []} is invalid: empty help string +} + +func ExampleCounterVec() { + httpReqs := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "http_requests_total", + Help: "How many HTTP requests processed, partitioned by status code and HTTP method.", + }, + []string{"code", "method"}, + ) + prometheus.MustRegister(httpReqs) + + httpReqs.WithLabelValues("404", "POST").Add(42) + + // If you have to access the same set of labels very frequently, it + // might be good to retrieve the metric only once and keep a handle to + // it. But beware of deletion of that metric, see below! + m := httpReqs.WithLabelValues("200", "GET") + for i := 0; i < 1000000; i++ { + m.Inc() + } + // Delete a metric from the vector. If you have previously kept a handle + // to that metric (as above), future updates via that handle will go + // unseen (even if you re-create a metric with the same label set + // later). + httpReqs.DeleteLabelValues("200", "GET") + // Same thing with the more verbose Labels syntax. + httpReqs.Delete(prometheus.Labels{"method": "GET", "code": "200"}) +} + +func ExampleInstrumentHandler() { + // Handle the "/doc" endpoint with the standard http.FileServer handler. + // By wrapping the handler with InstrumentHandler, request count, + // request and response sizes, and request latency are automatically + // exported to Prometheus, partitioned by HTTP status code and method + // and by the handler name (here "fileserver"). + http.Handle("/doc", prometheus.InstrumentHandler( + "fileserver", http.FileServer(http.Dir("/usr/share/doc")), + )) + // The Prometheus handler still has to be registered to handle the + // "/metrics" endpoint. The handler returned by prometheus.Handler() is + // already instrumented - with "prometheus" as the handler name. In this + // example, we want the handler name to be "metrics", so we instrument + // the uninstrumented Prometheus handler ourselves. + http.Handle("/metrics", prometheus.InstrumentHandler( + "metrics", prometheus.UninstrumentedHandler(), + )) +} + +func ExampleLabelPairSorter() { + labelPairs := []*dto.LabelPair{ + &dto.LabelPair{Name: proto.String("status"), Value: proto.String("404")}, + &dto.LabelPair{Name: proto.String("method"), Value: proto.String("get")}, + } + + sort.Sort(prometheus.LabelPairSorter(labelPairs)) + + fmt.Println(labelPairs) + // Output: + // [name:"method" value:"get" name:"status" value:"404" ] +} + +func ExampleRegister() { + // Imagine you have a worker pool and want to count the tasks completed. + taskCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_total", + Help: "Total number of tasks completed.", + }) + // This will register fine. + if err := prometheus.Register(taskCounter); err != nil { + fmt.Println(err) + } else { + fmt.Println("taskCounter registered.") + } + // Don't forget to tell the HTTP server about the Prometheus handler. + // (In a real program, you still need to start the HTTP server...) + http.Handle("/metrics", prometheus.Handler()) + + // Now you can start workers and give every one of them a pointer to + // taskCounter and let it increment it whenever it completes a task. + taskCounter.Inc() // This has to happen somewhere in the worker code. + + // But wait, you want to see how individual workers perform. So you need + // a vector of counters, with one element for each worker. + taskCounterVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_total", + Help: "Total number of tasks completed.", + }, + []string{"worker_id"}, + ) + + // Registering will fail because we already have a metric of that name. + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + + // To fix, first unregister the old taskCounter. + if prometheus.Unregister(taskCounter) { + fmt.Println("taskCounter unregistered.") + } + + // Try registering taskCounterVec again. + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + // Bummer! Still doesn't work. + + // Prometheus will not allow you to ever export metrics with + // inconsistent help strings or label names. After unregistering, the + // unregistered metrics will cease to show up in the /metrics HTTP + // response, but the registry still remembers that those metrics had + // been exported before. For this example, we will now choose a + // different name. (In a real program, you would obviously not export + // the obsolete metric in the first place.) + taskCounterVec = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_by_id", + Help: "Total number of tasks completed.", + }, + []string{"worker_id"}, + ) + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + // Finally it worked! + + // The workers have to tell taskCounterVec their id to increment the + // right element in the metric vector. + taskCounterVec.WithLabelValues("42").Inc() // Code from worker 42. + + // Each worker could also keep a reference to their own counter element + // around. Pick the counter at initialization time of the worker. + myCounter := taskCounterVec.WithLabelValues("42") // From worker 42 initialization code. + myCounter.Inc() // Somewhere in the code of that worker. + + // Note that something like WithLabelValues("42", "spurious arg") would + // panic (because you have provided too many label values). If you want + // to get an error instead, use GetMetricWithLabelValues(...) instead. + notMyCounter, err := taskCounterVec.GetMetricWithLabelValues("42", "spurious arg") + if err != nil { + fmt.Println("Worker initialization failed:", err) + } + if notMyCounter == nil { + fmt.Println("notMyCounter is nil.") + } + + // A different (and somewhat tricky) approach is to use + // ConstLabels. ConstLabels are pairs of label names and label values + // that never change. You might ask what those labels are good for (and + // rightfully so - if they never change, they could as well be part of + // the metric name). There are essentially two use-cases: The first is + // if labels are constant throughout the lifetime of a binary execution, + // but they vary over time or between different instances of a running + // binary. The second is what we have here: Each worker creates and + // registers an own Counter instance where the only difference is in the + // value of the ConstLabels. Those Counters can all be registered + // because the different ConstLabel values guarantee that each worker + // will increment a different Counter metric. + counterOpts := prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks", + Help: "Total number of tasks completed.", + ConstLabels: prometheus.Labels{"worker_id": "42"}, + } + taskCounterForWorker42 := prometheus.NewCounter(counterOpts) + if err := prometheus.Register(taskCounterForWorker42); err != nil { + fmt.Println("taskCounterVForWorker42 not registered:", err) + } else { + fmt.Println("taskCounterForWorker42 registered.") + } + // Obviously, in real code, taskCounterForWorker42 would be a member + // variable of a worker struct, and the "42" would be retrieved with a + // GetId() method or something. The Counter would be created and + // registered in the initialization code of the worker. + + // For the creation of the next Counter, we can recycle + // counterOpts. Just change the ConstLabels. + counterOpts.ConstLabels = prometheus.Labels{"worker_id": "2001"} + taskCounterForWorker2001 := prometheus.NewCounter(counterOpts) + if err := prometheus.Register(taskCounterForWorker2001); err != nil { + fmt.Println("taskCounterVForWorker2001 not registered:", err) + } else { + fmt.Println("taskCounterForWorker2001 registered.") + } + + taskCounterForWorker2001.Inc() + taskCounterForWorker42.Inc() + taskCounterForWorker2001.Inc() + + // Yet another approach would be to turn the workers themselves into + // Collectors and register them. See the Collector example for details. + + // Output: + // taskCounter registered. + // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string + // taskCounter unregistered. + // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string + // taskCounterVec registered. + // Worker initialization failed: inconsistent label cardinality + // notMyCounter is nil. + // taskCounterForWorker42 registered. + // taskCounterForWorker2001 registered. +} + +func ExampleSummary() { + temps := prometheus.NewSummary(prometheus.SummaryOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. + }) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + } + + // Just for demonstration, let's check the state of the summary by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + temps.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // summary: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // quantile: < + // quantile: 0.5 + // value: 31.1 + // > + // quantile: < + // quantile: 0.9 + // value: 41.3 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > +} + +func ExampleSummaryVec() { + temps := prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. + }, + []string{"species"}, + ) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.WithLabelValues("litoria-caerulea").Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10) + } + + // Create a Summary without any observations. + temps.WithLabelValues("leiopelma-hochstetteri") + + // Just for demonstration, let's check the state of the summary vector + // by registering it with a custom registry and then let it collect the + // metrics. + reg := prometheus.NewRegistry() + reg.MustRegister(temps) + + metricFamilies, err := reg.Gather() + if err != nil || len(metricFamilies) != 1 { + panic("unexpected behavior of custom test registry") + } + fmt.Println(proto.MarshalTextString(metricFamilies[0])) + + // Output: + // name: "pond_temperature_celsius" + // help: "The temperature of the frog pond." + // type: SUMMARY + // metric: < + // label: < + // name: "species" + // value: "leiopelma-hochstetteri" + // > + // summary: < + // sample_count: 0 + // sample_sum: 0 + // quantile: < + // quantile: 0.5 + // value: nan + // > + // quantile: < + // quantile: 0.9 + // value: nan + // > + // quantile: < + // quantile: 0.99 + // value: nan + // > + // > + // > + // metric: < + // label: < + // name: "species" + // value: "lithobates-catesbeianus" + // > + // summary: < + // sample_count: 1000 + // sample_sum: 31956.100000000017 + // quantile: < + // quantile: 0.5 + // value: 32.4 + // > + // quantile: < + // quantile: 0.9 + // value: 41.4 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > + // > + // metric: < + // label: < + // name: "species" + // value: "litoria-caerulea" + // > + // summary: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // quantile: < + // quantile: 0.5 + // value: 31.1 + // > + // quantile: < + // quantile: 0.9 + // value: 41.3 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > + // > +} + +func ExampleNewConstSummary() { + desc := prometheus.NewDesc( + "http_request_duration_seconds", + "A summary of the HTTP request durations.", + []string{"code", "method"}, + prometheus.Labels{"owner": "example"}, + ) + + // Create a constant summary from values we got from a 3rd party telemetry system. + s := prometheus.MustNewConstSummary( + desc, + 4711, 403.34, + map[float64]float64{0.5: 42.3, 0.9: 323.3}, + "200", "get", + ) + + // Just for demonstration, let's check the state of the summary by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + s.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // label: < + // name: "code" + // value: "200" + // > + // label: < + // name: "method" + // value: "get" + // > + // label: < + // name: "owner" + // value: "example" + // > + // summary: < + // sample_count: 4711 + // sample_sum: 403.34 + // quantile: < + // quantile: 0.5 + // value: 42.3 + // > + // quantile: < + // quantile: 0.9 + // value: 323.3 + // > + // > +} + +func ExampleHistogram() { + temps := prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. + Buckets: prometheus.LinearBuckets(20, 5, 5), // 5 buckets, each 5 centigrade wide. + }) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + } + + // Just for demonstration, let's check the state of the histogram by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + temps.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // histogram: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // bucket: < + // cumulative_count: 192 + // upper_bound: 20 + // > + // bucket: < + // cumulative_count: 366 + // upper_bound: 25 + // > + // bucket: < + // cumulative_count: 501 + // upper_bound: 30 + // > + // bucket: < + // cumulative_count: 638 + // upper_bound: 35 + // > + // bucket: < + // cumulative_count: 816 + // upper_bound: 40 + // > + // > +} + +func ExampleNewConstHistogram() { + desc := prometheus.NewDesc( + "http_request_duration_seconds", + "A histogram of the HTTP request durations.", + []string{"code", "method"}, + prometheus.Labels{"owner": "example"}, + ) + + // Create a constant histogram from values we got from a 3rd party telemetry system. + h := prometheus.MustNewConstHistogram( + desc, + 4711, 403.34, + map[float64]uint64{25: 121, 50: 2403, 100: 3221, 200: 4233}, + "200", "get", + ) + + // Just for demonstration, let's check the state of the histogram by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + h.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // label: < + // name: "code" + // value: "200" + // > + // label: < + // name: "method" + // value: "get" + // > + // label: < + // name: "owner" + // value: "example" + // > + // histogram: < + // sample_count: 4711 + // sample_sum: 403.34 + // bucket: < + // cumulative_count: 121 + // upper_bound: 25 + // > + // bucket: < + // cumulative_count: 2403 + // upper_bound: 50 + // > + // bucket: < + // cumulative_count: 3221 + // upper_bound: 100 + // > + // bucket: < + // cumulative_count: 4233 + // upper_bound: 200 + // > + // > +} + +func ExampleAlreadyRegisteredError() { + reqCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "requests_total", + Help: "The total number of requests served.", + }) + if err := prometheus.Register(reqCounter); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + // A counter for that metric has been registered before. + // Use the old counter from now on. + reqCounter = are.ExistingCollector.(prometheus.Counter) + } else { + // Something else went wrong! + panic(err) + } + } +} + +func ExampleGatherers() { + reg := prometheus.NewRegistry() + temp := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "temperature_kelvin", + Help: "Temperature in Kelvin.", + }, + []string{"location"}, + ) + reg.MustRegister(temp) + temp.WithLabelValues("outside").Set(273.14) + temp.WithLabelValues("inside").Set(298.44) + + var parser expfmt.TextParser + + text := ` +# TYPE humidity_percent gauge +# HELP humidity_percent Humidity in %. +humidity_percent{location="outside"} 45.4 +humidity_percent{location="inside"} 33.2 +# TYPE temperature_kelvin gauge +# HELP temperature_kelvin Temperature in Kelvin. +temperature_kelvin{location="somewhere else"} 4.5 +` + + parseText := func() ([]*dto.MetricFamily, error) { + parsed, err := parser.TextToMetricFamilies(strings.NewReader(text)) + if err != nil { + return nil, err + } + var result []*dto.MetricFamily + for _, mf := range parsed { + result = append(result, mf) + } + return result, nil + } + + gatherers := prometheus.Gatherers{ + reg, + prometheus.GathererFunc(parseText), + } + + gathering, err := gatherers.Gather() + if err != nil { + fmt.Println(err) + } + + out := &bytes.Buffer{} + for _, mf := range gathering { + if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { + panic(err) + } + } + fmt.Print(out.String()) + fmt.Println("----------") + + // Note how the temperature_kelvin metric family has been merged from + // different sources. Now try + text = ` +# TYPE humidity_percent gauge +# HELP humidity_percent Humidity in %. +humidity_percent{location="outside"} 45.4 +humidity_percent{location="inside"} 33.2 +# TYPE temperature_kelvin gauge +# HELP temperature_kelvin Temperature in Kelvin. +# Duplicate metric: +temperature_kelvin{location="outside"} 265.3 + # Wrong labels: +temperature_kelvin 4.5 +` + + gathering, err = gatherers.Gather() + if err != nil { + fmt.Println(err) + } + // Note that still as many metrics as possible are returned: + out.Reset() + for _, mf := range gathering { + if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { + panic(err) + } + } + fmt.Print(out.String()) + + // Output: + // # HELP humidity_percent Humidity in %. + // # TYPE humidity_percent gauge + // humidity_percent{location="inside"} 33.2 + // humidity_percent{location="outside"} 45.4 + // # HELP temperature_kelvin Temperature in Kelvin. + // # TYPE temperature_kelvin gauge + // temperature_kelvin{location="inside"} 298.44 + // temperature_kelvin{location="outside"} 273.14 + // temperature_kelvin{location="somewhere else"} 4.5 + // ---------- + // 2 error(s) occurred: + // * collected metric temperature_kelvin label: gauge: was collected before with the same name and label values + // * collected metric temperature_kelvin gauge: has label dimensions inconsistent with previously collected metrics in the same metric family + // # HELP humidity_percent Humidity in %. + // # TYPE humidity_percent gauge + // humidity_percent{location="inside"} 33.2 + // humidity_percent{location="outside"} 45.4 + // # HELP temperature_kelvin Temperature in Kelvin. + // # TYPE temperature_kelvin gauge + // temperature_kelvin{location="inside"} 298.44 + // temperature_kelvin{location="outside"} 273.14 +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go new file mode 100644 index 000000000000..5d3128faed68 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go @@ -0,0 +1,97 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "expvar" + "fmt" + "sort" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +func ExampleExpvarCollector() { + expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{ + "memstats": prometheus.NewDesc( + "expvar_memstats", + "All numeric memstats as one metric family. Not a good role-model, actually... ;-)", + []string{"type"}, nil, + ), + "lone-int": prometheus.NewDesc( + "expvar_lone_int", + "Just an expvar int as an example.", + nil, nil, + ), + "http-request-map": prometheus.NewDesc( + "expvar_http_request_total", + "How many http requests processed, partitioned by status code and http method.", + []string{"code", "method"}, nil, + ), + }) + prometheus.MustRegister(expvarCollector) + + // The Prometheus part is done here. But to show that this example is + // doing anything, we have to manually export something via expvar. In + // real-life use-cases, some library would already have exported via + // expvar what we want to re-export as Prometheus metrics. + expvar.NewInt("lone-int").Set(42) + expvarMap := expvar.NewMap("http-request-map") + var ( + expvarMap1, expvarMap2 expvar.Map + expvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int + ) + expvarMap1.Init() + expvarMap2.Init() + expvarInt11.Set(3) + expvarInt12.Set(13) + expvarInt21.Set(11) + expvarInt22.Set(212) + expvarMap1.Set("POST", &expvarInt11) + expvarMap1.Set("GET", &expvarInt12) + expvarMap2.Set("POST", &expvarInt21) + expvarMap2.Set("GET", &expvarInt22) + expvarMap.Set("404", &expvarMap1) + expvarMap.Set("200", &expvarMap2) + // Results in the following expvar map: + // "http-request-count": {"200": {"POST": 11, "GET": 212}, "404": {"POST": 3, "GET": 13}} + + // Let's see what the scrape would yield, but exclude the memstats metrics. + metricStrings := []string{} + metric := dto.Metric{} + metricChan := make(chan prometheus.Metric) + go func() { + expvarCollector.Collect(metricChan) + close(metricChan) + }() + for m := range metricChan { + if strings.Index(m.Desc().String(), "expvar_memstats") == -1 { + metric.Reset() + m.Write(&metric) + metricStrings = append(metricStrings, metric.String()) + } + } + sort.Strings(metricStrings) + for _, s := range metricStrings { + fmt.Println(strings.TrimRight(s, " ")) + } + // Output: + // label: label: untyped: + // label: label: untyped: + // label: label: untyped: + // label: label: untyped: + // untyped: +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go new file mode 100644 index 000000000000..48cab463671b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go @@ -0,0 +1,182 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "sync" + "testing" + "testing/quick" + + dto "github.com/prometheus/client_model/go" +) + +func listenGaugeStream(vals, result chan float64, done chan struct{}) { + var sum float64 +outer: + for { + select { + case <-done: + close(vals) + for v := range vals { + sum += v + } + break outer + case v := <-vals: + sum += v + } + } + result <- sum + close(result) +} + +func TestGaugeConcurrency(t *testing.T) { + it := func(n uint32) bool { + mutations := int(n % 10000) + concLevel := int(n%15 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sStream := make(chan float64, mutations*concLevel) + result := make(chan float64) + done := make(chan struct{}) + + go listenGaugeStream(sStream, result, done) + go func() { + end.Wait() + close(done) + }() + + gge := NewGauge(GaugeOpts{ + Name: "test_gauge", + Help: "no help can be found here", + }) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + vals[j] = rand.Float64() - 0.5 + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sStream <- v + gge.Add(v) + } + end.Done() + }(vals) + } + start.Done() + + if expected, got := <-result, math.Float64frombits(gge.(*value).valBits); math.Abs(expected-got) > 0.000001 { + t.Fatalf("expected approx. %f, got %f", expected, got) + return false + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Fatal(err) + } +} + +func TestGaugeVecConcurrency(t *testing.T) { + it := func(n uint32) bool { + mutations := int(n % 10000) + concLevel := int(n%15 + 1) + vecLength := int(n%5 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sStreams := make([]chan float64, vecLength) + results := make([]chan float64, vecLength) + done := make(chan struct{}) + + for i := 0; i < vecLength; i++ { + sStreams[i] = make(chan float64, mutations*concLevel) + results[i] = make(chan float64) + go listenGaugeStream(sStreams[i], results[i], done) + } + + go func() { + end.Wait() + close(done) + }() + + gge := NewGaugeVec( + GaugeOpts{ + Name: "test_gauge", + Help: "no help can be found here", + }, + []string{"label"}, + ) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + pick := make([]int, mutations) + for j := 0; j < mutations; j++ { + vals[j] = rand.Float64() - 0.5 + pick[j] = rand.Intn(vecLength) + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + sStreams[pick[i]] <- v + gge.WithLabelValues(string('A' + pick[i])).Add(v) + } + end.Done() + }(vals) + } + start.Done() + + for i := range sStreams { + if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*value).valBits); math.Abs(expected-got) > 0.000001 { + t.Fatalf("expected approx. %f, got %f", expected, got) + return false + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Fatal(err) + } +} + +func TestGaugeFunc(t *testing.T) { + gf := NewGaugeFunc( + GaugeOpts{ + Name: "test_name", + Help: "test help", + ConstLabels: Labels{"a": "1", "b": "2"}, + }, + func() float64 { return 3.1415 }, + ) + + if expected, got := `Desc{fqName: "test_name", help: "test help", constLabels: {a="1",b="2"}, variableLabels: []}`, gf.Desc().String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } + + m := &dto.Metric{} + gf.Write(m) + + if expected, got := `label: label: gauge: `, m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go new file mode 100644 index 000000000000..9a8858cbd2b8 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go @@ -0,0 +1,123 @@ +package prometheus + +import ( + "runtime" + "testing" + "time" + + dto "github.com/prometheus/client_model/go" +) + +func TestGoCollector(t *testing.T) { + var ( + c = NewGoCollector() + ch = make(chan Metric) + waitc = make(chan struct{}) + closec = make(chan struct{}) + old = -1 + ) + defer close(closec) + + go func() { + c.Collect(ch) + go func(c <-chan struct{}) { + <-c + }(closec) + <-waitc + c.Collect(ch) + }() + + for { + select { + case metric := <-ch: + switch m := metric.(type) { + // Attention, this also catches Counter... + case Gauge: + pb := &dto.Metric{} + m.Write(pb) + if pb.GetGauge() == nil { + continue + } + + if old == -1 { + old = int(pb.GetGauge().GetValue()) + close(waitc) + continue + } + + if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 { + // TODO: This is flaky in highly concurrent situations. + t.Errorf("want 1 new goroutine, got %d", diff) + } + + // GoCollector performs two sends per call. + // On line 27 we need to receive the second send + // to shut down cleanly. + <-ch + return + } + case <-time.After(1 * time.Second): + t.Fatalf("expected collect timed out") + } + } +} + +func TestGCCollector(t *testing.T) { + var ( + c = NewGoCollector() + ch = make(chan Metric) + waitc = make(chan struct{}) + closec = make(chan struct{}) + oldGC uint64 + oldPause float64 + ) + defer close(closec) + + go func() { + c.Collect(ch) + // force GC + runtime.GC() + <-waitc + c.Collect(ch) + }() + + first := true + for { + select { + case metric := <-ch: + switch m := metric.(type) { + case *constSummary, *value: + pb := &dto.Metric{} + m.Write(pb) + if pb.GetSummary() == nil { + continue + } + + if len(pb.GetSummary().Quantile) != 5 { + t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile)) + } + for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} { + if *pb.GetSummary().Quantile[idx].Quantile != want { + t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want) + } + } + if first { + first = false + oldGC = *pb.GetSummary().SampleCount + oldPause = *pb.GetSummary().SampleSum + close(waitc) + continue + } + if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 { + t.Errorf("want 1 new garbage collection run, got %d", diff) + } + if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 { + t.Errorf("want moar pause, got %f", diff) + } + return + } + case <-time.After(1 * time.Second): + t.Fatalf("expected collect timed out") + } + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go new file mode 100644 index 000000000000..d1242e08d6ae --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go @@ -0,0 +1,326 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "reflect" + "sort" + "sync" + "testing" + "testing/quick" + + dto "github.com/prometheus/client_model/go" +) + +func benchmarkHistogramObserve(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewHistogram(HistogramOpts{}) + + for i := 0; i < w; i++ { + go func() { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Observe(float64(i)) + } + + wg.Done() + }() + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkHistogramObserve1(b *testing.B) { + benchmarkHistogramObserve(1, b) +} + +func BenchmarkHistogramObserve2(b *testing.B) { + benchmarkHistogramObserve(2, b) +} + +func BenchmarkHistogramObserve4(b *testing.B) { + benchmarkHistogramObserve(4, b) +} + +func BenchmarkHistogramObserve8(b *testing.B) { + benchmarkHistogramObserve(8, b) +} + +func benchmarkHistogramWrite(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewHistogram(HistogramOpts{}) + + for i := 0; i < 1000000; i++ { + s.Observe(float64(i)) + } + + for j := 0; j < w; j++ { + outs := make([]dto.Metric, b.N) + + go func(o []dto.Metric) { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Write(&o[i]) + } + + wg.Done() + }(outs) + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkHistogramWrite1(b *testing.B) { + benchmarkHistogramWrite(1, b) +} + +func BenchmarkHistogramWrite2(b *testing.B) { + benchmarkHistogramWrite(2, b) +} + +func BenchmarkHistogramWrite4(b *testing.B) { + benchmarkHistogramWrite(4, b) +} + +func BenchmarkHistogramWrite8(b *testing.B) { + benchmarkHistogramWrite(8, b) +} + +// Intentionally adding +Inf here to test if that case is handled correctly. +// Also, getCumulativeCounts depends on it. +var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)} + +func TestHistogramConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%5 + 1) + total := mutations * concLevel + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewHistogram(HistogramOpts{ + Name: "test_histogram", + Help: "helpless", + Buckets: testBuckets, + }) + + allVars := make([]float64, total) + var sampleSum float64 + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + allVars[i*mutations+j] = v + sampleSum += v + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sum.Observe(v) + } + end.Done() + }(vals) + } + sort.Float64s(allVars) + start.Done() + end.Wait() + + m := &dto.Metric{} + sum.Write(m) + if got, want := int(*m.Histogram.SampleCount), total; got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Histogram.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + wantCounts := getCumulativeCounts(allVars) + + if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { + t.Errorf("got %d buckets in protobuf, want %d", got, want) + } + for i, wantBound := range testBuckets { + if i == len(testBuckets)-1 { + break // No +Inf bucket in protobuf. + } + if gotBound := *m.Histogram.Bucket[i].UpperBound; gotBound != wantBound { + t.Errorf("got bound %f, want %f", gotBound, wantBound) + } + if gotCount, wantCount := *m.Histogram.Bucket[i].CumulativeCount, wantCounts[i]; gotCount != wantCount { + t.Errorf("got count %d, want %d", gotCount, wantCount) + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestHistogramVecConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%7 + 1) + vecLength := int(n%3 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + his := NewHistogramVec( + HistogramOpts{ + Name: "test_histogram", + Help: "helpless", + Buckets: []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}, + }, + []string{"label"}, + ) + + allVars := make([][]float64, vecLength) + sampleSums := make([]float64, vecLength) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + picks := make([]int, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + pick := rand.Intn(vecLength) + picks[j] = pick + allVars[pick] = append(allVars[pick], v) + sampleSums[pick] += v + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + his.WithLabelValues(string('A' + picks[i])).Observe(v) + } + end.Done() + }(vals) + } + for _, vars := range allVars { + sort.Float64s(vars) + } + start.Done() + end.Wait() + + for i := 0; i < vecLength; i++ { + m := &dto.Metric{} + s := his.WithLabelValues(string('A' + i)) + s.Write(m) + + if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { + t.Errorf("got %d buckets in protobuf, want %d", got, want) + } + if got, want := int(*m.Histogram.SampleCount), len(allVars[i]); got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Histogram.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + wantCounts := getCumulativeCounts(allVars[i]) + + for j, wantBound := range testBuckets { + if j == len(testBuckets)-1 { + break // No +Inf bucket in protobuf. + } + if gotBound := *m.Histogram.Bucket[j].UpperBound; gotBound != wantBound { + t.Errorf("got bound %f, want %f", gotBound, wantBound) + } + if gotCount, wantCount := *m.Histogram.Bucket[j].CumulativeCount, wantCounts[j]; gotCount != wantCount { + t.Errorf("got count %d, want %d", gotCount, wantCount) + } + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func getCumulativeCounts(vars []float64) []uint64 { + counts := make([]uint64, len(testBuckets)) + for _, v := range vars { + for i := len(testBuckets) - 1; i >= 0; i-- { + if v > testBuckets[i] { + break + } + counts[i]++ + } + } + return counts +} + +func TestBuckets(t *testing.T) { + got := LinearBuckets(-15, 5, 6) + want := []float64{-15, -10, -5, 0, 5, 10} + if !reflect.DeepEqual(got, want) { + t.Errorf("linear buckets: got %v, want %v", got, want) + } + + got = ExponentialBuckets(100, 1.2, 3) + want = []float64{100, 120, 144} + if !reflect.DeepEqual(got, want) { + t.Errorf("linear buckets: got %v, want %v", got, want) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http_test.go b/vendor/github.com/prometheus/client_golang/prometheus/http_test.go new file mode 100644 index 000000000000..ffe0418cf879 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/http_test.go @@ -0,0 +1,121 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" + + dto "github.com/prometheus/client_model/go" +) + +type respBody string + +func (b respBody) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusTeapot) + w.Write([]byte(b)) +} + +func TestInstrumentHandler(t *testing.T) { + defer func(n nower) { + now = n.(nower) + }(now) + + instant := time.Now() + end := instant.Add(30 * time.Second) + now = nowSeries(instant, end) + respBody := respBody("Howdy there!") + + hndlr := InstrumentHandler("test-handler", respBody) + + opts := SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": "test-handler"}, + } + + reqCnt := MustRegisterOrGet(NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + )).(*CounterVec) + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := MustRegisterOrGet(NewSummary(opts)).(Summary) + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + MustRegisterOrGet(NewSummary(opts)) + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + MustRegisterOrGet(NewSummary(opts)) + + reqCnt.Reset() + + resp := httptest.NewRecorder() + req := &http.Request{ + Method: "GET", + } + + hndlr.ServeHTTP(resp, req) + + if resp.Code != http.StatusTeapot { + t.Fatalf("expected status %d, got %d", http.StatusTeapot, resp.Code) + } + if string(resp.Body.Bytes()) != "Howdy there!" { + t.Fatalf("expected body %s, got %s", "Howdy there!", string(resp.Body.Bytes())) + } + + out := &dto.Metric{} + reqDur.Write(out) + if want, got := "test-handler", out.Label[0].GetValue(); want != got { + t.Errorf("want label value %q in reqDur, got %q", want, got) + } + if want, got := uint64(1), out.Summary.GetSampleCount(); want != got { + t.Errorf("want sample count %d in reqDur, got %d", want, got) + } + + out.Reset() + if want, got := 1, len(reqCnt.children); want != got { + t.Errorf("want %d children in reqCnt, got %d", want, got) + } + cnt, err := reqCnt.GetMetricWithLabelValues("get", "418") + if err != nil { + t.Fatal(err) + } + cnt.Write(out) + if want, got := "418", out.Label[0].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if want, got := "test-handler", out.Label[1].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if want, got := "get", out.Label[2].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if out.Counter == nil { + t.Fatal("expected non-nil counter in reqCnt") + } + if want, got := 1., out.Counter.GetValue(); want != got { + t.Errorf("want reqCnt of %f, got %f", want, got) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric_test.go b/vendor/github.com/prometheus/client_golang/prometheus/metric_test.go new file mode 100644 index 000000000000..7145f5e53c35 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric_test.go @@ -0,0 +1,35 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "testing" + +func TestBuildFQName(t *testing.T) { + scenarios := []struct{ namespace, subsystem, name, result string }{ + {"a", "b", "c", "a_b_c"}, + {"", "b", "c", "b_c"}, + {"a", "", "c", "a_c"}, + {"", "", "c", "c"}, + {"a", "b", "", ""}, + {"a", "", "", ""}, + {"", "b", "", ""}, + {" ", "", "", ""}, + } + + for i, s := range scenarios { + if want, got := s.result, BuildFQName(s.namespace, s.subsystem, s.name); want != got { + t.Errorf("%d. want %s, got %s", i, want, got) + } + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go new file mode 100644 index 000000000000..d3362dae72e5 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go @@ -0,0 +1,58 @@ +package prometheus + +import ( + "bytes" + "os" + "regexp" + "testing" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/procfs" +) + +func TestProcessCollector(t *testing.T) { + if _, err := procfs.Self(); err != nil { + t.Skipf("skipping TestProcessCollector, procfs not available: %s", err) + } + + registry := NewRegistry() + if err := registry.Register(NewProcessCollector(os.Getpid(), "")); err != nil { + t.Fatal(err) + } + if err := registry.Register(NewProcessCollectorPIDFn( + func() (int, error) { return os.Getpid(), nil }, "foobar"), + ); err != nil { + t.Fatal(err) + } + + mfs, err := registry.Gather() + if err != nil { + t.Fatal(err) + } + + var buf bytes.Buffer + for _, mf := range mfs { + if _, err := expfmt.MetricFamilyToText(&buf, mf); err != nil { + t.Fatal(err) + } + } + + for _, re := range []*regexp.Regexp{ + regexp.MustCompile("process_cpu_seconds_total [0-9]"), + regexp.MustCompile("process_max_fds [1-9]"), + regexp.MustCompile("process_open_fds [1-9]"), + regexp.MustCompile("process_virtual_memory_bytes [1-9]"), + regexp.MustCompile("process_resident_memory_bytes [1-9]"), + regexp.MustCompile("process_start_time_seconds [0-9.]{10,}"), + regexp.MustCompile("foobar_process_cpu_seconds_total [0-9]"), + regexp.MustCompile("foobar_process_max_fds [1-9]"), + regexp.MustCompile("foobar_process_open_fds [1-9]"), + regexp.MustCompile("foobar_process_virtual_memory_bytes [1-9]"), + regexp.MustCompile("foobar_process_resident_memory_bytes [1-9]"), + regexp.MustCompile("foobar_process_start_time_seconds [0-9.]{10,}"), + } { + if !re.Match(buf.Bytes()) { + t.Errorf("want body to match %s\n%s", re, buf.String()) + } + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/BUILD b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/BUILD index b59a86f4f0c2..324359c01976 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/BUILD +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/BUILD @@ -1,20 +1,24 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["http.go"], importpath = "github.com/prometheus/client_golang/prometheus/promhttp", + visibility = ["//visibility:public"], deps = [ "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/prometheus/common/expfmt:go_default_library", ], ) +go_test( + name = "go_default_test", + srcs = ["http_test.go"], + importpath = "github.com/prometheus/client_golang/prometheus/promhttp", + library = ":go_default_library", + deps = ["//vendor/github.com/prometheus/client_golang/prometheus:go_default_library"], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -26,4 +30,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go new file mode 100644 index 000000000000..d4a7d4a7b57f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go @@ -0,0 +1,137 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package promhttp + +import ( + "bytes" + "errors" + "log" + "net/http" + "net/http/httptest" + "testing" + + "github.com/prometheus/client_golang/prometheus" +) + +type errorCollector struct{} + +func (e errorCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- prometheus.NewDesc("invalid_metric", "not helpful", nil, nil) +} + +func (e errorCollector) Collect(ch chan<- prometheus.Metric) { + ch <- prometheus.NewInvalidMetric( + prometheus.NewDesc("invalid_metric", "not helpful", nil, nil), + errors.New("collect error"), + ) +} + +func TestHandlerErrorHandling(t *testing.T) { + + // Create a registry that collects a MetricFamily with two elements, + // another with one, and reports an error. + reg := prometheus.NewRegistry() + + cnt := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "the_count", + Help: "Ah-ah-ah! Thunder and lightning!", + }) + reg.MustRegister(cnt) + + cntVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + cntVec.WithLabelValues("val1").Inc() + cntVec.WithLabelValues("val2").Inc() + reg.MustRegister(cntVec) + + reg.MustRegister(errorCollector{}) + + logBuf := &bytes.Buffer{} + logger := log.New(logBuf, "", 0) + + writer := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/", nil) + request.Header.Add("Accept", "test/plain") + + errorHandler := HandlerFor(reg, HandlerOpts{ + ErrorLog: logger, + ErrorHandling: HTTPErrorOnError, + }) + continueHandler := HandlerFor(reg, HandlerOpts{ + ErrorLog: logger, + ErrorHandling: ContinueOnError, + }) + panicHandler := HandlerFor(reg, HandlerOpts{ + ErrorLog: logger, + ErrorHandling: PanicOnError, + }) + wantMsg := `error gathering metrics: error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error +` + wantErrorBody := `An error has occurred during metrics gathering: + +error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error +` + wantOKBody := `# HELP name docstring +# TYPE name counter +name{constname="constvalue",labelname="val1"} 1 +name{constname="constvalue",labelname="val2"} 1 +# HELP the_count Ah-ah-ah! Thunder and lightning! +# TYPE the_count counter +the_count 0 +` + + errorHandler.ServeHTTP(writer, request) + if got, want := writer.Code, http.StatusInternalServerError; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + if got := logBuf.String(); got != wantMsg { + t.Errorf("got log message:\n%s\nwant log mesage:\n%s\n", got, wantMsg) + } + if got := writer.Body.String(); got != wantErrorBody { + t.Errorf("got body:\n%s\nwant body:\n%s\n", got, wantErrorBody) + } + logBuf.Reset() + writer.Body.Reset() + writer.Code = http.StatusOK + + continueHandler.ServeHTTP(writer, request) + if got, want := writer.Code, http.StatusOK; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + if got := logBuf.String(); got != wantMsg { + t.Errorf("got log message %q, want %q", got, wantMsg) + } + if got := writer.Body.String(); got != wantOKBody { + t.Errorf("got body %q, want %q", got, wantOKBody) + } + + defer func() { + if err := recover(); err == nil { + t.Error("expected panic from panicHandler") + } + }() + panicHandler.ServeHTTP(writer, request) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go b/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go new file mode 100644 index 000000000000..9dacb6256dd7 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go @@ -0,0 +1,545 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package prometheus_test + +import ( + "bytes" + "net/http" + "net/http/httptest" + "testing" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +func testHandler(t testing.TB) { + + metricVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + + metricVec.WithLabelValues("val1").Inc() + metricVec.WithLabelValues("val2").Inc() + + externalMetricFamily := &dto.MetricFamily{ + Name: proto.String("externalname"), + Help: proto.String("externaldocstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("externalconstname"), + Value: proto.String("externalconstvalue"), + }, + { + Name: proto.String("externallabelname"), + Value: proto.String("externalval1"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + } + externalBuf := &bytes.Buffer{} + enc := expfmt.NewEncoder(externalBuf, expfmt.FmtProtoDelim) + if err := enc.Encode(externalMetricFamily); err != nil { + t.Fatal(err) + } + externalMetricFamilyAsBytes := externalBuf.Bytes() + externalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring +# TYPE externalname counter +externalname{externalconstname="externalconstvalue",externallabelname="externalval1"} 1 +`) + externalMetricFamilyAsProtoText := []byte(`name: "externalname" +help: "externaldocstring" +type: COUNTER +metric: < + label: < + name: "externalconstname" + value: "externalconstvalue" + > + label: < + name: "externallabelname" + value: "externalval1" + > + counter: < + value: 1 + > +> + +`) + externalMetricFamilyAsProtoCompactText := []byte(`name:"externalname" help:"externaldocstring" type:COUNTER metric: label: counter: > +`) + + expectedMetricFamily := &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("docstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + } + buf := &bytes.Buffer{} + enc = expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) + if err := enc.Encode(expectedMetricFamily); err != nil { + t.Fatal(err) + } + expectedMetricFamilyAsBytes := buf.Bytes() + expectedMetricFamilyAsText := []byte(`# HELP name docstring +# TYPE name counter +name{constname="constvalue",labelname="val1"} 1 +name{constname="constvalue",labelname="val2"} 1 +`) + expectedMetricFamilyAsProtoText := []byte(`name: "name" +help: "docstring" +type: COUNTER +metric: < + label: < + name: "constname" + value: "constvalue" + > + label: < + name: "labelname" + value: "val1" + > + counter: < + value: 1 + > +> +metric: < + label: < + name: "constname" + value: "constvalue" + > + label: < + name: "labelname" + value: "val2" + > + counter: < + value: 1 + > +> + +`) + expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > +`) + + externalMetricFamilyWithSameName := &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("docstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("different_val"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(42), + }, + }, + }, + } + + expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > metric: label: counter: > +`) + + type output struct { + headers map[string]string + body []byte + } + + var scenarios = []struct { + headers map[string]string + out output + collector prometheus.Collector + externalMF []*dto.MetricFamily + }{ + { // 0 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, dings/bums;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 1 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, application/quark;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 2 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 3 + headers: map[string]string{ + "Accept": "text/plain;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: []byte{}, + }, + }, + { // 4 + headers: map[string]string{ + "Accept": "application/json", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: expectedMetricFamilyAsText, + }, + collector: metricVec, + }, + { // 5 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: expectedMetricFamilyAsBytes, + }, + collector: metricVec, + }, + { // 6 + headers: map[string]string{ + "Accept": "application/json", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: externalMetricFamilyAsText, + }, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 7 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: externalMetricFamilyAsBytes, + }, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 8 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsBytes, + expectedMetricFamilyAsBytes, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 9 + headers: map[string]string{ + "Accept": "text/plain", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 10 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: expectedMetricFamilyAsText, + }, + collector: metricVec, + }, + { // 11 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5;version=0.0.4", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsText, + expectedMetricFamilyAsText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 12 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.2, text/plain;q=0.5;version=0.0.2", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsBytes, + expectedMetricFamilyAsBytes, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 13 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=text;q=0.5, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.4", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoText, + expectedMetricFamilyAsProtoText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 14 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoCompactText, + expectedMetricFamilyAsProtoCompactText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 15 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoCompactText, + expectedMetricFamilyMergedWithExternalAsProtoCompactText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{ + externalMetricFamily, + externalMetricFamilyWithSameName, + }, + }, + } + for i, scenario := range scenarios { + registry := prometheus.NewPedanticRegistry() + gatherer := prometheus.Gatherer(registry) + if scenario.externalMF != nil { + gatherer = prometheus.Gatherers{ + registry, + prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { + return scenario.externalMF, nil + }), + } + } + + if scenario.collector != nil { + registry.Register(scenario.collector) + } + writer := httptest.NewRecorder() + handler := prometheus.InstrumentHandler("prometheus", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{})) + request, _ := http.NewRequest("GET", "/", nil) + for key, value := range scenario.headers { + request.Header.Add(key, value) + } + handler(writer, request) + + for key, value := range scenario.out.headers { + if writer.HeaderMap.Get(key) != value { + t.Errorf( + "%d. expected %q for header %q, got %q", + i, value, key, writer.Header().Get(key), + ) + } + } + + if !bytes.Equal(scenario.out.body, writer.Body.Bytes()) { + t.Errorf( + "%d. expected body:\n%s\ngot body:\n%s\n", + i, scenario.out.body, writer.Body.Bytes(), + ) + } + } +} + +func TestHandler(t *testing.T) { + testHandler(t) +} + +func BenchmarkHandler(b *testing.B) { + for i := 0; i < b.N; i++ { + testHandler(b) + } +} + +func TestRegisterWithOrGet(t *testing.T) { + // Replace the default registerer just to be sure. This is bad, but this + // whole test will go away once RegisterOrGet is removed. + oldRegisterer := prometheus.DefaultRegisterer + defer func() { + prometheus.DefaultRegisterer = oldRegisterer + }() + prometheus.DefaultRegisterer = prometheus.NewRegistry() + original := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "test", + Help: "help", + }, + []string{"foo", "bar"}, + ) + equalButNotSame := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "test", + Help: "help", + }, + []string{"foo", "bar"}, + ) + if err := prometheus.Register(original); err != nil { + t.Fatal(err) + } + if err := prometheus.Register(equalButNotSame); err == nil { + t.Fatal("expected error when registringe equal collector") + } + existing, err := prometheus.RegisterOrGet(equalButNotSame) + if err != nil { + t.Fatal(err) + } + if existing != original { + t.Error("expected original collector but got something else") + } + if existing == equalButNotSame { + t.Error("expected original callector but got new one") + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go b/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go new file mode 100644 index 000000000000..c4575ffbd21b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go @@ -0,0 +1,347 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "sort" + "sync" + "testing" + "testing/quick" + "time" + + dto "github.com/prometheus/client_model/go" +) + +func benchmarkSummaryObserve(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewSummary(SummaryOpts{}) + + for i := 0; i < w; i++ { + go func() { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Observe(float64(i)) + } + + wg.Done() + }() + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkSummaryObserve1(b *testing.B) { + benchmarkSummaryObserve(1, b) +} + +func BenchmarkSummaryObserve2(b *testing.B) { + benchmarkSummaryObserve(2, b) +} + +func BenchmarkSummaryObserve4(b *testing.B) { + benchmarkSummaryObserve(4, b) +} + +func BenchmarkSummaryObserve8(b *testing.B) { + benchmarkSummaryObserve(8, b) +} + +func benchmarkSummaryWrite(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewSummary(SummaryOpts{}) + + for i := 0; i < 1000000; i++ { + s.Observe(float64(i)) + } + + for j := 0; j < w; j++ { + outs := make([]dto.Metric, b.N) + + go func(o []dto.Metric) { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Write(&o[i]) + } + + wg.Done() + }(outs) + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkSummaryWrite1(b *testing.B) { + benchmarkSummaryWrite(1, b) +} + +func BenchmarkSummaryWrite2(b *testing.B) { + benchmarkSummaryWrite(2, b) +} + +func BenchmarkSummaryWrite4(b *testing.B) { + benchmarkSummaryWrite(4, b) +} + +func BenchmarkSummaryWrite8(b *testing.B) { + benchmarkSummaryWrite(8, b) +} + +func TestSummaryConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%5 + 1) + total := mutations * concLevel + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewSummary(SummaryOpts{ + Name: "test_summary", + Help: "helpless", + }) + + allVars := make([]float64, total) + var sampleSum float64 + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + allVars[i*mutations+j] = v + sampleSum += v + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sum.Observe(v) + } + end.Done() + }(vals) + } + sort.Float64s(allVars) + start.Done() + end.Wait() + + m := &dto.Metric{} + sum.Write(m) + if got, want := int(*m.Summary.SampleCount), total; got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Summary.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + for i, wantQ := range objectives { + ε := DefObjectives[wantQ] + gotQ := *m.Summary.Quantile[i].Quantile + gotV := *m.Summary.Quantile[i].Value + min, max := getBounds(allVars, wantQ, ε) + if gotQ != wantQ { + t.Errorf("got quantile %f, want %f", gotQ, wantQ) + } + if gotV < min || gotV > max { + t.Errorf("got %f for quantile %f, want [%f,%f]", gotV, gotQ, min, max) + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestSummaryVecConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%7 + 1) + vecLength := int(n%3 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewSummaryVec( + SummaryOpts{ + Name: "test_summary", + Help: "helpless", + }, + []string{"label"}, + ) + + allVars := make([][]float64, vecLength) + sampleSums := make([]float64, vecLength) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + picks := make([]int, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + pick := rand.Intn(vecLength) + picks[j] = pick + allVars[pick] = append(allVars[pick], v) + sampleSums[pick] += v + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + sum.WithLabelValues(string('A' + picks[i])).Observe(v) + } + end.Done() + }(vals) + } + for _, vars := range allVars { + sort.Float64s(vars) + } + start.Done() + end.Wait() + + for i := 0; i < vecLength; i++ { + m := &dto.Metric{} + s := sum.WithLabelValues(string('A' + i)) + s.Write(m) + if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want { + t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want) + } + if got, want := *m.Summary.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f for label %c, want %f", got, 'A'+i, want) + } + for j, wantQ := range objectives { + ε := DefObjectives[wantQ] + gotQ := *m.Summary.Quantile[j].Quantile + gotV := *m.Summary.Quantile[j].Value + min, max := getBounds(allVars[i], wantQ, ε) + if gotQ != wantQ { + t.Errorf("got quantile %f for label %c, want %f", gotQ, 'A'+i, wantQ) + } + if gotV < min || gotV > max { + t.Errorf("got %f for quantile %f for label %c, want [%f,%f]", gotV, gotQ, 'A'+i, min, max) + } + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestSummaryDecay(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + // More because it depends on timing than because it is particularly long... + } + + sum := NewSummary(SummaryOpts{ + Name: "test_summary", + Help: "helpless", + MaxAge: 100 * time.Millisecond, + Objectives: map[float64]float64{0.1: 0.001}, + AgeBuckets: 10, + }) + + m := &dto.Metric{} + i := 0 + tick := time.NewTicker(time.Millisecond) + for _ = range tick.C { + i++ + sum.Observe(float64(i)) + if i%10 == 0 { + sum.Write(m) + if got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 { + t.Errorf("%d. got %f, want %f", i, got, want) + } + m.Reset() + } + if i >= 1000 { + break + } + } + tick.Stop() + // Wait for MaxAge without observations and make sure quantiles are NaN. + time.Sleep(100 * time.Millisecond) + sum.Write(m) + if got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) { + t.Errorf("got %f, want NaN after expiration", got) + } +} + +func getBounds(vars []float64, q, ε float64) (min, max float64) { + // TODO(beorn7): This currently tolerates an error of up to 2*ε. The + // error must be at most ε, but for some reason, it's sometimes slightly + // higher. That's a bug. + n := float64(len(vars)) + lower := int((q - 2*ε) * n) + upper := int(math.Ceil((q + 2*ε) * n)) + min = vars[0] + if lower > 1 { + min = vars[lower-1] + } + max = vars[len(vars)-1] + if upper < len(vars) { + max = vars[upper-1] + } + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go b/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go new file mode 100644 index 000000000000..445a6b39fe0d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go @@ -0,0 +1,312 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "testing" + + dto "github.com/prometheus/client_model/go" +) + +func TestDelete(t *testing.T) { + vec := NewUntypedVec( + UntypedOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + testDelete(t, vec) +} + +func TestDeleteWithCollisions(t *testing.T) { + vec := NewUntypedVec( + UntypedOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + vec.hashAdd = func(h uint64, s string) uint64 { return 1 } + vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } + testDelete(t, vec) +} + +func testDelete(t *testing.T, vec *UntypedVec) { + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l1": "v1"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestDeleteLabelValues(t *testing.T) { + vec := NewUntypedVec( + UntypedOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + testDeleteLabelValues(t, vec) +} + +func TestDeleteLabelValuesWithCollisions(t *testing.T) { + vec := NewUntypedVec( + UntypedOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + vec.hashAdd = func(h uint64, s string) uint64 { return 1 } + vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } + testDeleteLabelValues(t, vec) +} + +func testDeleteLabelValues(t *testing.T, vec *UntypedVec) { + if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + vec.With(Labels{"l1": "v1", "l2": "v3"}).(Untyped).Set(42) // Add junk data for collision. + if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1", "v3"), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + // Delete out of order. + if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestMetricVec(t *testing.T) { + vec := NewUntypedVec( + UntypedOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + testMetricVec(t, vec) +} + +func TestMetricVecWithCollisions(t *testing.T) { + vec := NewUntypedVec( + UntypedOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + vec.hashAdd = func(h uint64, s string) uint64 { return 1 } + vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } + testMetricVec(t, vec) +} + +func testMetricVec(t *testing.T, vec *UntypedVec) { + vec.Reset() // Actually test Reset now! + + var pair [2]string + // Keep track of metrics. + expected := map[[2]string]int{} + + for i := 0; i < 1000; i++ { + pair[0], pair[1] = fmt.Sprint(i%4), fmt.Sprint(i%5) // Varying combinations multiples. + expected[pair]++ + vec.WithLabelValues(pair[0], pair[1]).Inc() + + expected[[2]string{"v1", "v2"}]++ + vec.WithLabelValues("v1", "v2").(Untyped).Inc() + } + + var total int + for _, metrics := range vec.children { + for _, metric := range metrics { + total++ + copy(pair[:], metric.values) + + var metricOut dto.Metric + if err := metric.metric.Write(&metricOut); err != nil { + t.Fatal(err) + } + actual := *metricOut.Untyped.Value + + var actualPair [2]string + for i, label := range metricOut.Label { + actualPair[i] = *label.Value + } + + // Test output pair against metric.values to ensure we've selected + // the right one. We check this to ensure the below check means + // anything at all. + if actualPair != pair { + t.Fatalf("unexpected pair association in metric map: %v != %v", actualPair, pair) + } + + if actual != float64(expected[pair]) { + t.Fatalf("incorrect counter value for %v: %v != %v", pair, actual, expected[pair]) + } + } + } + + if total != len(expected) { + t.Fatalf("unexpected number of metrics: %v != %v", total, len(expected)) + } + + vec.Reset() + + if len(vec.children) > 0 { + t.Fatalf("reset failed") + } +} + +func TestCounterVecEndToEndWithCollision(t *testing.T) { + vec := NewCounterVec( + CounterOpts{ + Name: "test", + Help: "helpless", + }, + []string{"labelname"}, + ) + vec.WithLabelValues("77kepQFQ8Kl").Inc() + vec.WithLabelValues("!0IC=VloaY").Add(2) + + m := &dto.Metric{} + if err := vec.WithLabelValues("77kepQFQ8Kl").Write(m); err != nil { + t.Fatal(err) + } + if got, want := m.GetLabel()[0].GetValue(), "77kepQFQ8Kl"; got != want { + t.Errorf("got label value %q, want %q", got, want) + } + if got, want := m.GetCounter().GetValue(), 1.; got != want { + t.Errorf("got value %f, want %f", got, want) + } + m.Reset() + if err := vec.WithLabelValues("!0IC=VloaY").Write(m); err != nil { + t.Fatal(err) + } + if got, want := m.GetLabel()[0].GetValue(), "!0IC=VloaY"; got != want { + t.Errorf("got label value %q, want %q", got, want) + } + if got, want := m.GetCounter().GetValue(), 2.; got != want { + t.Errorf("got value %f, want %f", got, want) + } +} + +func BenchmarkMetricVecWithLabelValuesBasic(b *testing.B) { + benchmarkMetricVecWithLabelValues(b, map[string][]string{ + "l1": []string{"onevalue"}, + "l2": []string{"twovalue"}, + }) +} + +func BenchmarkMetricVecWithLabelValues2Keys10ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 2, 10) +} + +func BenchmarkMetricVecWithLabelValues4Keys10ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 4, 10) +} + +func BenchmarkMetricVecWithLabelValues2Keys100ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 2, 100) +} + +func BenchmarkMetricVecWithLabelValues10Keys100ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 10, 100) +} + +func BenchmarkMetricVecWithLabelValues10Keys1000ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 10, 1000) +} + +func benchmarkMetricVecWithLabelValuesCardinality(b *testing.B, nkeys, nvalues int) { + labels := map[string][]string{} + + for i := 0; i < nkeys; i++ { + var ( + k = fmt.Sprintf("key-%v", i) + vs = make([]string, 0, nvalues) + ) + for j := 0; j < nvalues; j++ { + vs = append(vs, fmt.Sprintf("value-%v", j)) + } + labels[k] = vs + } + + benchmarkMetricVecWithLabelValues(b, labels) +} + +func benchmarkMetricVecWithLabelValues(b *testing.B, labels map[string][]string) { + var keys []string + for k := range labels { // Map order dependent, who cares though. + keys = append(keys, k) + } + + values := make([]string, len(labels)) // Value cache for permutations. + vec := NewUntypedVec( + UntypedOpts{ + Name: "test", + Help: "helpless", + }, + keys, + ) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + // Varies input across provide map entries based on key size. + for j, k := range keys { + candidates := labels[k] + values[j] = candidates[i%len(candidates)] + } + + vec.WithLabelValues(values...) + } +} diff --git a/vendor/github.com/prometheus/client_model/.gitignore b/vendor/github.com/prometheus/client_model/.gitignore new file mode 100644 index 000000000000..2f7896d1d136 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/.gitignore @@ -0,0 +1 @@ +target/ diff --git a/vendor/github.com/prometheus/client_model/AUTHORS.md b/vendor/github.com/prometheus/client_model/AUTHORS.md new file mode 100644 index 000000000000..e8b3efa6ac5d --- /dev/null +++ b/vendor/github.com/prometheus/client_model/AUTHORS.md @@ -0,0 +1,13 @@ +The Prometheus project was started by Matt T. Proud (emeritus) and +Julius Volz in 2012. + +Maintainers of this repository: + +* Björn Rabenstein + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Björn Rabenstein +* Matt T. Proud +* Tobias Schmidt diff --git a/vendor/github.com/prometheus/client_model/CONTRIBUTING.md b/vendor/github.com/prometheus/client_model/CONTRIBUTING.md new file mode 100644 index 000000000000..573d58741b49 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines for the Go parts are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/client_model/Makefile b/vendor/github.com/prometheus/client_model/Makefile new file mode 100644 index 000000000000..9cc23b3408c2 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/Makefile @@ -0,0 +1,61 @@ +# Copyright 2013 Prometheus Team +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +KEY_ID ?= _DEFINE_ME_ + +all: cpp go java python ruby + +SUFFIXES: + +cpp: cpp/metrics.pb.cc cpp/metrics.pb.h + +cpp/metrics.pb.cc: metrics.proto + protoc $< --cpp_out=cpp/ + +cpp/metrics.pb.h: metrics.proto + protoc $< --cpp_out=cpp/ + +go: go/metrics.pb.go + +go/metrics.pb.go: metrics.proto + protoc $< --go_out=go/ + +java: src/main/java/io/prometheus/client/Metrics.java pom.xml + mvn clean compile package + +src/main/java/io/prometheus/client/Metrics.java: metrics.proto + protoc $< --java_out=src/main/java + +python: python/prometheus/client/model/metrics_pb2.py + +python/prometheus/client/model/metrics_pb2.py: metrics.proto + protoc $< --python_out=python/prometheus/client/model + +ruby: + $(MAKE) -C ruby build + +clean: + -rm -rf cpp/* + -rm -rf go/* + -rm -rf java/* + -rm -rf python/* + -$(MAKE) -C ruby clean + -mvn clean + +maven-deploy-snapshot: java + mvn clean deploy -Dgpg.keyname=$(KEY_ID) -DperformRelease=true + +maven-deploy-release: java + mvn clean release:clean release:prepare release:perform -Dgpg.keyname=$(KEY_ID) -DperformRelease=true + +.PHONY: all clean cpp go java maven-deploy-snapshot maven-deploy-release python ruby diff --git a/vendor/github.com/prometheus/client_model/README.md b/vendor/github.com/prometheus/client_model/README.md new file mode 100644 index 000000000000..a710042db5ef --- /dev/null +++ b/vendor/github.com/prometheus/client_model/README.md @@ -0,0 +1,26 @@ +# Background +Under most circumstances, manually downloading this repository should never +be required. + +# Prerequisites +# Base +* [Google Protocol Buffers](https://developers.google.com/protocol-buffers) + +## Java +* [Apache Maven](http://maven.apache.org) +* [Prometheus Maven Repository](https://github.com/prometheus/io.prometheus-maven-repository) checked out into ../io.prometheus-maven-repository + +## Go +* [Go](http://golang.org) +* [goprotobuf](https://code.google.com/p/goprotobuf) + +## Ruby +* [Ruby](https://www.ruby-lang.org) +* [bundler](https://rubygems.org/gems/bundler) + +# Building + $ make + +# Getting Started + * The Go source code is periodically indexed: [Go Protocol Buffer Model](http://godoc.org/github.com/prometheus/client_model/go). + * All of the core developers are accessible via the [Prometheus Developers Mailinglist](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). diff --git a/vendor/github.com/prometheus/client_model/go/BUILD b/vendor/github.com/prometheus/client_model/go/BUILD index 673a4471a3c8..033e145de014 100644 --- a/vendor/github.com/prometheus/client_model/go/BUILD +++ b/vendor/github.com/prometheus/client_model/go/BUILD @@ -1,14 +1,10 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["metrics.pb.go"], importpath = "github.com/prometheus/client_model/go", + visibility = ["//visibility:public"], deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], ) @@ -23,4 +19,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/prometheus/client_model/metrics.proto b/vendor/github.com/prometheus/client_model/metrics.proto new file mode 100644 index 000000000000..0b84af920069 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/metrics.proto @@ -0,0 +1,81 @@ +// Copyright 2013 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package io.prometheus.client; +option java_package = "io.prometheus.client"; + +message LabelPair { + optional string name = 1; + optional string value = 2; +} + +enum MetricType { + COUNTER = 0; + GAUGE = 1; + SUMMARY = 2; + UNTYPED = 3; + HISTOGRAM = 4; +} + +message Gauge { + optional double value = 1; +} + +message Counter { + optional double value = 1; +} + +message Quantile { + optional double quantile = 1; + optional double value = 2; +} + +message Summary { + optional uint64 sample_count = 1; + optional double sample_sum = 2; + repeated Quantile quantile = 3; +} + +message Untyped { + optional double value = 1; +} + +message Histogram { + optional uint64 sample_count = 1; + optional double sample_sum = 2; + repeated Bucket bucket = 3; // Ordered in increasing order of upper_bound, +Inf bucket is optional. +} + +message Bucket { + optional uint64 cumulative_count = 1; // Cumulative in increasing order. + optional double upper_bound = 2; // Inclusive. +} + +message Metric { + repeated LabelPair label = 1; + optional Gauge gauge = 2; + optional Counter counter = 3; + optional Summary summary = 4; + optional Untyped untyped = 5; + optional Histogram histogram = 7; + optional int64 timestamp_ms = 6; +} + +message MetricFamily { + optional string name = 1; + optional string help = 2; + optional MetricType type = 3; + repeated Metric metric = 4; +} diff --git a/vendor/github.com/prometheus/client_model/pom.xml b/vendor/github.com/prometheus/client_model/pom.xml new file mode 100644 index 000000000000..4d34c9015723 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/pom.xml @@ -0,0 +1,130 @@ + + + 4.0.0 + + io.prometheus.client + model + 0.0.3-SNAPSHOT + + + org.sonatype.oss + oss-parent + 7 + + + Prometheus Client Data Model + http://github.com/prometheus/client_model + + Prometheus Client Data Model: Generated Protocol Buffer Assets + + + + + The Apache Software License, Version 2.0 + http://www.apache.org/licenses/LICENSE-2.0.txt + repo + + + + + scm:git:git@github.com:prometheus/client_model.git + scm:git:git@github.com:prometheus/client_model.git + git@github.com:prometheus/client_model.git + + + + + mtp + Matt T. Proud + matt.proud@gmail.com + + + + + + com.google.protobuf + protobuf-java + 2.5.0 + + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 2.8 + + UTF-8 + UTF-8 + true + + + + generate-javadoc-site-report + site + + javadoc + + + + attach-javadocs + + jar + + + + + + maven-compiler-plugin + + 1.6 + 1.6 + + 3.1 + + + org.apache.maven.plugins + maven-source-plugin + 2.2.1 + + + attach-sources + + jar + + + + + + + + + release-sign-artifacts + + + performRelease + true + + + + + + org.apache.maven.plugins + maven-gpg-plugin + 1.4 + + + sign-artifacts + verify + + sign + + + + + + + + + diff --git a/vendor/github.com/prometheus/client_model/setup.py b/vendor/github.com/prometheus/client_model/setup.py new file mode 100644 index 000000000000..67b9f20e3ad5 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/setup.py @@ -0,0 +1,23 @@ +#!/usr/bin/python + +from setuptools import setup + +setup( + name = 'prometheus_client_model', + version = '0.0.1', + author = 'Matt T. Proud', + author_email = 'matt.proud@gmail.com', + description = 'Data model artifacts for the Prometheus client.', + license = 'Apache License 2.0', + url = 'http://github.com/prometheus/client_model', + packages = ['prometheus', 'prometheus/client', 'prometheus/client/model'], + package_dir = {'': 'python'}, + requires = ['protobuf(==2.4.1)'], + platforms = 'Platform Independent', + classifiers = ['Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Intended Audience :: System Administrators', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Topic :: Software Development :: Testing', + 'Topic :: System :: Monitoring']) diff --git a/vendor/github.com/prometheus/common/.travis.yml b/vendor/github.com/prometheus/common/.travis.yml new file mode 100644 index 000000000000..529b0a4286e9 --- /dev/null +++ b/vendor/github.com/prometheus/common/.travis.yml @@ -0,0 +1,7 @@ +sudo: false + +language: go +go: + - 1.4.3 + - 1.5.1 + - tip diff --git a/vendor/github.com/prometheus/common/AUTHORS.md b/vendor/github.com/prometheus/common/AUTHORS.md new file mode 100644 index 000000000000..cec1574cc34c --- /dev/null +++ b/vendor/github.com/prometheus/common/AUTHORS.md @@ -0,0 +1,11 @@ +Maintainers of this repository: + +* Fabian Reinartz + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Björn Rabenstein +* Fabian Reinartz +* Julius Volz +* Miguel Molina diff --git a/vendor/github.com/prometheus/common/CONTRIBUTING.md b/vendor/github.com/prometheus/common/CONTRIBUTING.md new file mode 100644 index 000000000000..5705f0fbea29 --- /dev/null +++ b/vendor/github.com/prometheus/common/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/common/README.md b/vendor/github.com/prometheus/common/README.md new file mode 100644 index 000000000000..10c3b931ecdd --- /dev/null +++ b/vendor/github.com/prometheus/common/README.md @@ -0,0 +1,8 @@ +# Common +[![Build Status](https://travis-ci.org/prometheus/common.svg)](https://travis-ci.org/prometheus/common) + +This repository contains Go libraries that are shared across Prometheus +components and libraries. + +* **model**: Shared data structures +* **expfmt**: Decoding and encoding for the exposition format diff --git a/vendor/github.com/prometheus/common/expfmt/BUILD b/vendor/github.com/prometheus/common/expfmt/BUILD index c3b10b7df3c1..31faa8f6d99b 100644 --- a/vendor/github.com/prometheus/common/expfmt/BUILD +++ b/vendor/github.com/prometheus/common/expfmt/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -16,6 +11,7 @@ go_library( "text_parse.go", ], importpath = "github.com/prometheus/common/expfmt", + visibility = ["//visibility:public"], deps = [ "//vendor/bitbucket.org/ww/goautoneg:go_default_library", "//vendor/github.com/golang/protobuf/proto:go_default_library", @@ -25,6 +21,25 @@ go_library( ], ) +go_test( + name = "go_default_test", + srcs = [ + "bench_test.go", + "decode_test.go", + "json_decode_test.go", + "text_create_test.go", + "text_parse_test.go", + ], + importpath = "github.com/prometheus/common/expfmt", + library = ":go_default_library", + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/matttproud/golang_protobuf_extensions/pbutil:go_default_library", + "//vendor/github.com/prometheus/client_model/go:go_default_library", + "//vendor/github.com/prometheus/common/model:go_default_library", + ], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -36,4 +51,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/prometheus/common/expfmt/bench_test.go b/vendor/github.com/prometheus/common/expfmt/bench_test.go new file mode 100644 index 000000000000..92b16a028aad --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/bench_test.go @@ -0,0 +1,171 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bytes" + "compress/gzip" + "io" + "io/ioutil" + "testing" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + + dto "github.com/prometheus/client_model/go" +) + +var parser TextParser + +// Benchmarks to show how much penalty text format parsing actually inflicts. +// +// Example results on Linux 3.13.0, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz, go1.4. +// +// BenchmarkParseText 1000 1188535 ns/op 205085 B/op 6135 allocs/op +// BenchmarkParseTextGzip 1000 1376567 ns/op 246224 B/op 6151 allocs/op +// BenchmarkParseProto 10000 172790 ns/op 52258 B/op 1160 allocs/op +// BenchmarkParseProtoGzip 5000 324021 ns/op 94931 B/op 1211 allocs/op +// BenchmarkParseProtoMap 10000 187946 ns/op 58714 B/op 1203 allocs/op +// +// CONCLUSION: The overhead for the map is negligible. Text format needs ~5x more allocations. +// Without compression, it needs ~7x longer, but with compression (the more relevant scenario), +// the difference becomes less relevant, only ~4x. +// +// The test data contains 248 samples. +// +// BenchmarkProcessor002ParseOnly in the extraction package is not quite +// comparable to the benchmarks here, but it gives an idea: JSON parsing is even +// slower than text parsing and needs a comparable amount of allocs. + +// BenchmarkParseText benchmarks the parsing of a text-format scrape into metric +// family DTOs. +func BenchmarkParseText(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/text") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + if _, err := parser.TextToMetricFamilies(bytes.NewReader(data)); err != nil { + b.Fatal(err) + } + } +} + +// BenchmarkParseTextGzip benchmarks the parsing of a gzipped text-format scrape +// into metric family DTOs. +func BenchmarkParseTextGzip(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/text.gz") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + in, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + b.Fatal(err) + } + if _, err := parser.TextToMetricFamilies(in); err != nil { + b.Fatal(err) + } + } +} + +// BenchmarkParseProto benchmarks the parsing of a protobuf-format scrape into +// metric family DTOs. Note that this does not build a map of metric families +// (as the text version does), because it is not required for Prometheus +// ingestion either. (However, it is required for the text-format parsing, as +// the metric family might be sprinkled all over the text, while the +// protobuf-format guarantees bundling at one place.) +func BenchmarkParseProto(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/protobuf") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + family := &dto.MetricFamily{} + in := bytes.NewReader(data) + for { + family.Reset() + if _, err := pbutil.ReadDelimited(in, family); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } + } +} + +// BenchmarkParseProtoGzip is like BenchmarkParseProto above, but parses gzipped +// protobuf format. +func BenchmarkParseProtoGzip(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/protobuf.gz") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + family := &dto.MetricFamily{} + in, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + b.Fatal(err) + } + for { + family.Reset() + if _, err := pbutil.ReadDelimited(in, family); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } + } +} + +// BenchmarkParseProtoMap is like BenchmarkParseProto but DOES put the parsed +// metric family DTOs into a map. This is not happening during Prometheus +// ingestion. It is just here to measure the overhead of that map creation and +// separate it from the overhead of the text format parsing. +func BenchmarkParseProtoMap(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/protobuf") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + families := map[string]*dto.MetricFamily{} + in := bytes.NewReader(data) + for { + family := &dto.MetricFamily{} + if _, err := pbutil.ReadDelimited(in, family); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + families[family.GetName()] = family + } + } +} diff --git a/vendor/github.com/prometheus/common/expfmt/decode_test.go b/vendor/github.com/prometheus/common/expfmt/decode_test.go new file mode 100644 index 000000000000..9f78d9dabc2f --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode_test.go @@ -0,0 +1,356 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "io" + "net/http" + "reflect" + "sort" + "strings" + "testing" + + "github.com/prometheus/common/model" +) + +func TestTextDecoder(t *testing.T) { + var ( + ts = model.Now() + in = ` +# Only a quite simple scenario with two metric families. +# More complicated tests of the parser itself can be found in the text package. +# TYPE mf2 counter +mf2 3 +mf1{label="value1"} -3.14 123456 +mf1{label="value2"} 42 +mf2 4 +` + out = model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "mf1", + "label": "value1", + }, + Value: -3.14, + Timestamp: 123456, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "mf1", + "label": "value2", + }, + Value: 42, + Timestamp: ts, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "mf2", + }, + Value: 3, + Timestamp: ts, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "mf2", + }, + Value: 4, + Timestamp: ts, + }, + } + ) + + dec := &SampleDecoder{ + Dec: &textDecoder{r: strings.NewReader(in)}, + Opts: &DecodeOptions{ + Timestamp: ts, + }, + } + var all model.Vector + for { + var smpls model.Vector + err := dec.Decode(&smpls) + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + all = append(all, smpls...) + } + sort.Sort(all) + sort.Sort(out) + if !reflect.DeepEqual(all, out) { + t.Fatalf("output does not match") + } +} + +func TestProtoDecoder(t *testing.T) { + + var testTime = model.Now() + + scenarios := []struct { + in string + expected model.Vector + }{ + { + in: "", + }, + { + in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_label_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@", + expected: model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "some_label_name": "some_label_value", + }, + Value: -42, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "another_label_name": "another_label_value", + }, + Value: 84, + Timestamp: testTime, + }, + }, + }, + { + in: "\xb9\x01\n\rrequest_count\x12\x12Number of requests\x18\x02\"O\n#\n\x0fsome_label_name\x12\x10some_label_value\"(\x1a\x12\t\xaeG\xe1z\x14\xae\xef?\x11\x00\x00\x00\x00\x00\x00E\xc0\x1a\x12\t+\x87\x16\xd9\xce\xf7\xef?\x11\x00\x00\x00\x00\x00\x00U\xc0\"A\n)\n\x12another_label_name\x12\x13another_label_value\"\x14\x1a\x12\t\x00\x00\x00\x00\x00\x00\xe0?\x11\x00\x00\x00\x00\x00\x00$@", + expected: model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count_count", + "some_label_name": "some_label_value", + }, + Value: 0, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count_sum", + "some_label_name": "some_label_value", + }, + Value: 0, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "some_label_name": "some_label_value", + "quantile": "0.99", + }, + Value: -42, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "some_label_name": "some_label_value", + "quantile": "0.999", + }, + Value: -84, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count_count", + "another_label_name": "another_label_value", + }, + Value: 0, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count_sum", + "another_label_name": "another_label_value", + }, + Value: 0, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "another_label_name": "another_label_value", + "quantile": "0.5", + }, + Value: 10, + Timestamp: testTime, + }, + }, + }, + { + in: "\x8d\x01\n\x1drequest_duration_microseconds\x12\x15The response latency.\x18\x04\"S:Q\b\x85\x15\x11\xcd\xcc\xccL\x8f\xcb:A\x1a\v\b{\x11\x00\x00\x00\x00\x00\x00Y@\x1a\f\b\x9c\x03\x11\x00\x00\x00\x00\x00\x00^@\x1a\f\b\xd0\x04\x11\x00\x00\x00\x00\x00\x00b@\x1a\f\b\xf4\v\x11\x9a\x99\x99\x99\x99\x99e@\x1a\f\b\x85\x15\x11\x00\x00\x00\x00\x00\x00\xf0\u007f", + expected: model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "100", + }, + Value: 123, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "120", + }, + Value: 412, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "144", + }, + Value: 592, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "172.8", + }, + Value: 1524, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "+Inf", + }, + Value: 2693, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_sum", + }, + Value: 1756047.3, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_count", + }, + Value: 2693, + Timestamp: testTime, + }, + }, + }, + { + // The metric type is unset in this protobuf, which needs to be handled + // correctly by the decoder. + in: "\x1c\n\rrequest_count\"\v\x1a\t\t\x00\x00\x00\x00\x00\x00\xf0?", + expected: model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + }, + Value: 1, + Timestamp: testTime, + }, + }, + }, + } + + for i, scenario := range scenarios { + dec := &SampleDecoder{ + Dec: &protoDecoder{r: strings.NewReader(scenario.in)}, + Opts: &DecodeOptions{ + Timestamp: testTime, + }, + } + + var all model.Vector + for { + var smpls model.Vector + err := dec.Decode(&smpls) + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + all = append(all, smpls...) + } + sort.Sort(all) + sort.Sort(scenario.expected) + if !reflect.DeepEqual(all, scenario.expected) { + t.Fatalf("%d. output does not match, want: %#v, got %#v", i, scenario.expected, all) + } + } +} + +func testDiscriminatorHTTPHeader(t testing.TB) { + var scenarios = []struct { + input map[string]string + output Format + err error + }{ + { + input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="delimited"`}, + output: FmtProtoDelim, + }, + { + input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="illegal"; encoding="delimited"`}, + output: FmtUnknown, + }, + { + input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="illegal"`}, + output: FmtUnknown, + }, + { + input: map[string]string{"Content-Type": `text/plain; version=0.0.4`}, + output: FmtText, + }, + { + input: map[string]string{"Content-Type": `text/plain`}, + output: FmtText, + }, + { + input: map[string]string{"Content-Type": `text/plain; version=0.0.3`}, + output: FmtUnknown, + }, + } + + for i, scenario := range scenarios { + var header http.Header + + if len(scenario.input) > 0 { + header = http.Header{} + } + + for key, value := range scenario.input { + header.Add(key, value) + } + + actual := ResponseFormat(header) + + if scenario.output != actual { + t.Errorf("%d. expected %s, got %s", i, scenario.output, actual) + } + } +} + +func TestDiscriminatorHTTPHeader(t *testing.T) { + testDiscriminatorHTTPHeader(t) +} + +func BenchmarkDiscriminatorHTTPHeader(b *testing.B) { + for i := 0; i < b.N; i++ { + testDiscriminatorHTTPHeader(b) + } +} diff --git a/vendor/github.com/prometheus/common/expfmt/json_decode_test.go b/vendor/github.com/prometheus/common/expfmt/json_decode_test.go new file mode 100644 index 000000000000..c98ea29e1768 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/json_decode_test.go @@ -0,0 +1,124 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "os" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" +) + +func TestJSON2Decode(t *testing.T) { + f, err := os.Open("testdata/json2") + if err != nil { + t.Fatal(err) + } + defer f.Close() + + dec := newJSON2Decoder(f) + + var v1 dto.MetricFamily + if err := dec.Decode(&v1); err != nil { + t.Fatal(err) + } + + exp1 := dto.MetricFamily{ + Type: dto.MetricType_UNTYPED.Enum(), + Help: proto.String("RPC calls."), + Name: proto.String("rpc_calls_total"), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("job"), + Value: proto.String("batch_job"), + }, { + Name: proto.String("service"), + Value: proto.String("zed"), + }, + }, + Untyped: &dto.Untyped{ + Value: proto.Float64(25), + }, + }, + { + Label: []*dto.LabelPair{ + { + Name: proto.String("job"), + Value: proto.String("batch_job"), + }, { + Name: proto.String("service"), + Value: proto.String("bar"), + }, + }, + Untyped: &dto.Untyped{ + Value: proto.Float64(24), + }, + }, + }, + } + + if !reflect.DeepEqual(v1, exp1) { + t.Fatalf("Expected %v, got %v", exp1, v1) + } + + var v2 dto.MetricFamily + if err := dec.Decode(&v2); err != nil { + t.Fatal(err) + } + + exp2 := dto.MetricFamily{ + Type: dto.MetricType_UNTYPED.Enum(), + Help: proto.String("RPC latency."), + Name: proto.String("rpc_latency_microseconds"), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("percentile"), + Value: proto.String("0.010000"), + }, { + Name: proto.String("service"), + Value: proto.String("foo"), + }, + }, + Untyped: &dto.Untyped{ + Value: proto.Float64(15), + }, + }, + { + Label: []*dto.LabelPair{ + { + Name: proto.String("percentile"), + Value: proto.String("0.990000"), + }, { + Name: proto.String("service"), + Value: proto.String("foo"), + }, + }, + Untyped: &dto.Untyped{ + Value: proto.Float64(17), + }, + }, + }, + } + + if !reflect.DeepEqual(v2, exp2) { + t.Fatalf("Expected %v, got %v", exp2, v2) + } + +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create_test.go b/vendor/github.com/prometheus/common/expfmt/text_create_test.go new file mode 100644 index 000000000000..e4cc5d803bb8 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_create_test.go @@ -0,0 +1,443 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bytes" + "math" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +func testCreate(t testing.TB) { + var scenarios = []struct { + in *dto.MetricFamily + out string + }{ + // 0: Counter, NaN as value, timestamp given. + { + in: &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("two-line\n doc str\\ing"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(math.NaN()), + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(.23), + }, + TimestampMs: proto.Int64(1234567890), + }, + }, + }, + out: `# HELP name two-line\n doc str\\ing +# TYPE name counter +name{labelname="val1",basename="basevalue"} NaN +name{labelname="val2",basename="basevalue"} 0.23 1234567890 +`, + }, + // 1: Gauge, some escaping required, +Inf as value, multi-byte characters in label values. + { + in: &dto.MetricFamily{ + Name: proto.String("gauge_name"), + Help: proto.String("gauge\ndoc\nstr\"ing"), + Type: dto.MetricType_GAUGE.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("name_1"), + Value: proto.String("val with\nnew line"), + }, + &dto.LabelPair{ + Name: proto.String("name_2"), + Value: proto.String("val with \\backslash and \"quotes\""), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(math.Inf(+1)), + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("name_1"), + Value: proto.String("Björn"), + }, + &dto.LabelPair{ + Name: proto.String("name_2"), + Value: proto.String("佖佥"), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(3.14E42), + }, + }, + }, + }, + out: `# HELP gauge_name gauge\ndoc\nstr"ing +# TYPE gauge_name gauge +gauge_name{name_1="val with\nnew line",name_2="val with \\backslash and \"quotes\""} +Inf +gauge_name{name_1="Björn",name_2="佖佥"} 3.14e+42 +`, + }, + // 2: Untyped, no help, one sample with no labels and -Inf as value, another sample with one label. + { + in: &dto.MetricFamily{ + Name: proto.String("untyped_name"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("name_1"), + Value: proto.String("value 1"), + }, + }, + Untyped: &dto.Untyped{ + Value: proto.Float64(-1.23e-45), + }, + }, + }, + }, + out: `# TYPE untyped_name untyped +untyped_name -Inf +untyped_name{name_1="value 1"} -1.23e-45 +`, + }, + // 3: Summary. + { + in: &dto.MetricFamily{ + Name: proto.String("summary_name"), + Help: proto.String("summary docstring"), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Summary: &dto.Summary{ + SampleCount: proto.Uint64(42), + SampleSum: proto.Float64(-3.4567), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.5), + Value: proto.Float64(-1.23), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.9), + Value: proto.Float64(.2342354), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.99), + Value: proto.Float64(0), + }, + }, + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("name_1"), + Value: proto.String("value 1"), + }, + &dto.LabelPair{ + Name: proto.String("name_2"), + Value: proto.String("value 2"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(4711), + SampleSum: proto.Float64(2010.1971), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.5), + Value: proto.Float64(1), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.9), + Value: proto.Float64(2), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.99), + Value: proto.Float64(3), + }, + }, + }, + }, + }, + }, + out: `# HELP summary_name summary docstring +# TYPE summary_name summary +summary_name{quantile="0.5"} -1.23 +summary_name{quantile="0.9"} 0.2342354 +summary_name{quantile="0.99"} 0 +summary_name_sum -3.4567 +summary_name_count 42 +summary_name{name_1="value 1",name_2="value 2",quantile="0.5"} 1 +summary_name{name_1="value 1",name_2="value 2",quantile="0.9"} 2 +summary_name{name_1="value 1",name_2="value 2",quantile="0.99"} 3 +summary_name_sum{name_1="value 1",name_2="value 2"} 2010.1971 +summary_name_count{name_1="value 1",name_2="value 2"} 4711 +`, + }, + // 4: Histogram + { + in: &dto.MetricFamily{ + Name: proto.String("request_duration_microseconds"), + Help: proto.String("The response latency."), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(2693), + SampleSum: proto.Float64(1756047.3), + Bucket: []*dto.Bucket{ + &dto.Bucket{ + UpperBound: proto.Float64(100), + CumulativeCount: proto.Uint64(123), + }, + &dto.Bucket{ + UpperBound: proto.Float64(120), + CumulativeCount: proto.Uint64(412), + }, + &dto.Bucket{ + UpperBound: proto.Float64(144), + CumulativeCount: proto.Uint64(592), + }, + &dto.Bucket{ + UpperBound: proto.Float64(172.8), + CumulativeCount: proto.Uint64(1524), + }, + &dto.Bucket{ + UpperBound: proto.Float64(math.Inf(+1)), + CumulativeCount: proto.Uint64(2693), + }, + }, + }, + }, + }, + }, + out: `# HELP request_duration_microseconds The response latency. +# TYPE request_duration_microseconds histogram +request_duration_microseconds_bucket{le="100"} 123 +request_duration_microseconds_bucket{le="120"} 412 +request_duration_microseconds_bucket{le="144"} 592 +request_duration_microseconds_bucket{le="172.8"} 1524 +request_duration_microseconds_bucket{le="+Inf"} 2693 +request_duration_microseconds_sum 1.7560473e+06 +request_duration_microseconds_count 2693 +`, + }, + // 5: Histogram with missing +Inf bucket. + { + in: &dto.MetricFamily{ + Name: proto.String("request_duration_microseconds"), + Help: proto.String("The response latency."), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(2693), + SampleSum: proto.Float64(1756047.3), + Bucket: []*dto.Bucket{ + &dto.Bucket{ + UpperBound: proto.Float64(100), + CumulativeCount: proto.Uint64(123), + }, + &dto.Bucket{ + UpperBound: proto.Float64(120), + CumulativeCount: proto.Uint64(412), + }, + &dto.Bucket{ + UpperBound: proto.Float64(144), + CumulativeCount: proto.Uint64(592), + }, + &dto.Bucket{ + UpperBound: proto.Float64(172.8), + CumulativeCount: proto.Uint64(1524), + }, + }, + }, + }, + }, + }, + out: `# HELP request_duration_microseconds The response latency. +# TYPE request_duration_microseconds histogram +request_duration_microseconds_bucket{le="100"} 123 +request_duration_microseconds_bucket{le="120"} 412 +request_duration_microseconds_bucket{le="144"} 592 +request_duration_microseconds_bucket{le="172.8"} 1524 +request_duration_microseconds_bucket{le="+Inf"} 2693 +request_duration_microseconds_sum 1.7560473e+06 +request_duration_microseconds_count 2693 +`, + }, + // 6: No metric type, should result in default type Counter. + { + in: &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("doc string"), + Metric: []*dto.Metric{ + &dto.Metric{ + Counter: &dto.Counter{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + }, + }, + out: `# HELP name doc string +# TYPE name counter +name -Inf +`, + }, + } + + for i, scenario := range scenarios { + out := bytes.NewBuffer(make([]byte, 0, len(scenario.out))) + n, err := MetricFamilyToText(out, scenario.in) + if err != nil { + t.Errorf("%d. error: %s", i, err) + continue + } + if expected, got := len(scenario.out), n; expected != got { + t.Errorf( + "%d. expected %d bytes written, got %d", + i, expected, got, + ) + } + if expected, got := scenario.out, out.String(); expected != got { + t.Errorf( + "%d. expected out=%q, got %q", + i, expected, got, + ) + } + } + +} + +func TestCreate(t *testing.T) { + testCreate(t) +} + +func BenchmarkCreate(b *testing.B) { + for i := 0; i < b.N; i++ { + testCreate(b) + } +} + +func testCreateError(t testing.TB) { + var scenarios = []struct { + in *dto.MetricFamily + err string + }{ + // 0: No metric. + { + in: &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("doc string"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{}, + }, + err: "MetricFamily has no metrics", + }, + // 1: No metric name. + { + in: &dto.MetricFamily{ + Help: proto.String("doc string"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + }, + }, + err: "MetricFamily has no name", + }, + // 2: Wrong type. + { + in: &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("doc string"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + }, + }, + err: "expected counter in metric", + }, + } + + for i, scenario := range scenarios { + var out bytes.Buffer + _, err := MetricFamilyToText(&out, scenario.in) + if err == nil { + t.Errorf("%d. expected error, got nil", i) + continue + } + if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 { + t.Errorf( + "%d. expected error starting with %q, got %q", + i, expected, got, + ) + } + } + +} + +func TestCreateError(t *testing.T) { + testCreateError(t) +} + +func BenchmarkCreateError(b *testing.B) { + for i := 0; i < b.N; i++ { + testCreateError(b) + } +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse_test.go b/vendor/github.com/prometheus/common/expfmt/text_parse_test.go new file mode 100644 index 000000000000..589c87a9d5fd --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_parse_test.go @@ -0,0 +1,586 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "math" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" +) + +func testTextParse(t testing.TB) { + var scenarios = []struct { + in string + out []*dto.MetricFamily + }{ + // 0: Empty lines as input. + { + in: ` + +`, + out: []*dto.MetricFamily{}, + }, + // 1: Minimal case. + { + in: ` +minimal_metric 1.234 +another_metric -3e3 103948 +# Even that: +no_labels{} 3 +# HELP line for non-existing metric will be ignored. +`, + out: []*dto.MetricFamily{ + &dto.MetricFamily{ + Name: proto.String("minimal_metric"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(1.234), + }, + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("another_metric"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(-3e3), + }, + TimestampMs: proto.Int64(103948), + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("no_labels"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(3), + }, + }, + }, + }, + }, + }, + // 2: Counters & gauges, docstrings, various whitespace, escape sequences. + { + in: ` +# A normal comment. +# +# TYPE name counter +name{labelname="val1",basename="basevalue"} NaN +name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890 +# HELP name two-line\n doc str\\ing + + # HELP name2 doc str"ing 2 + # TYPE name2 gauge +name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321 +name2{ labelname = "val1" , }-Inf +`, + out: []*dto.MetricFamily{ + &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("two-line\n doc str\\ing"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(math.NaN()), + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("base\"v\\al\nue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(.23), + }, + TimestampMs: proto.Int64(1234567890), + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("name2"), + Help: proto.String("doc str\"ing 2"), + Type: dto.MetricType_GAUGE.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue2"), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(math.Inf(+1)), + }, + TimestampMs: proto.Int64(54321), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + }, + }, + }, + }, + // 3: The evil summary, mixed with other types and funny comments. + { + in: ` +# TYPE my_summary summary +my_summary{n1="val1",quantile="0.5"} 110 +decoy -1 -2 +my_summary{n1="val1",quantile="0.9"} 140 1 +my_summary_count{n1="val1"} 42 +# Latest timestamp wins in case of a summary. +my_summary_sum{n1="val1"} 4711 2 +fake_sum{n1="val1"} 2001 +# TYPE another_summary summary +another_summary_count{n2="val2",n1="val1"} 20 +my_summary_count{n2="val2",n1="val1"} 5 5 +another_summary{n1="val1",n2="val2",quantile=".3"} -1.2 +my_summary_sum{n1="val2"} 08 15 +my_summary{n1="val3", quantile="0.2"} 4711 + my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN +# some +# funny comments +# HELP +# HELP +# HELP my_summary +# HELP my_summary +`, + out: []*dto.MetricFamily{ + &dto.MetricFamily{ + Name: proto.String("fake_sum"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Untyped: &dto.Untyped{ + Value: proto.Float64(2001), + }, + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("decoy"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(-1), + }, + TimestampMs: proto.Int64(-2), + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("my_summary"), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(42), + SampleSum: proto.Float64(4711), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.5), + Value: proto.Float64(110), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.9), + Value: proto.Float64(140), + }, + }, + }, + TimestampMs: proto.Int64(2), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n2"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(5), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(-12.34), + Value: proto.Float64(math.NaN()), + }, + }, + }, + TimestampMs: proto.Int64(5), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val2"), + }, + }, + Summary: &dto.Summary{ + SampleSum: proto.Float64(8), + }, + TimestampMs: proto.Int64(15), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val3"), + }, + }, + Summary: &dto.Summary{ + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.2), + Value: proto.Float64(4711), + }, + }, + }, + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("another_summary"), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n2"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(20), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.3), + Value: proto.Float64(-1.2), + }, + }, + }, + }, + }, + }, + }, + }, + // 4: The histogram. + { + in: ` +# HELP request_duration_microseconds The response latency. +# TYPE request_duration_microseconds histogram +request_duration_microseconds_bucket{le="100"} 123 +request_duration_microseconds_bucket{le="120"} 412 +request_duration_microseconds_bucket{le="144"} 592 +request_duration_microseconds_bucket{le="172.8"} 1524 +request_duration_microseconds_bucket{le="+Inf"} 2693 +request_duration_microseconds_sum 1.7560473e+06 +request_duration_microseconds_count 2693 +`, + out: []*dto.MetricFamily{ + { + Name: proto.String("request_duration_microseconds"), + Help: proto.String("The response latency."), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(2693), + SampleSum: proto.Float64(1756047.3), + Bucket: []*dto.Bucket{ + &dto.Bucket{ + UpperBound: proto.Float64(100), + CumulativeCount: proto.Uint64(123), + }, + &dto.Bucket{ + UpperBound: proto.Float64(120), + CumulativeCount: proto.Uint64(412), + }, + &dto.Bucket{ + UpperBound: proto.Float64(144), + CumulativeCount: proto.Uint64(592), + }, + &dto.Bucket{ + UpperBound: proto.Float64(172.8), + CumulativeCount: proto.Uint64(1524), + }, + &dto.Bucket{ + UpperBound: proto.Float64(math.Inf(+1)), + CumulativeCount: proto.Uint64(2693), + }, + }, + }, + }, + }, + }, + }, + }, + } + + for i, scenario := range scenarios { + out, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in)) + if err != nil { + t.Errorf("%d. error: %s", i, err) + continue + } + if expected, got := len(scenario.out), len(out); expected != got { + t.Errorf( + "%d. expected %d MetricFamilies, got %d", + i, expected, got, + ) + } + for _, expected := range scenario.out { + got, ok := out[expected.GetName()] + if !ok { + t.Errorf( + "%d. expected MetricFamily %q, found none", + i, expected.GetName(), + ) + continue + } + if expected.String() != got.String() { + t.Errorf( + "%d. expected MetricFamily %s, got %s", + i, expected, got, + ) + } + } + } +} + +func TestTextParse(t *testing.T) { + testTextParse(t) +} + +func BenchmarkTextParse(b *testing.B) { + for i := 0; i < b.N; i++ { + testTextParse(b) + } +} + +func testTextParseError(t testing.TB) { + var scenarios = []struct { + in string + err string + }{ + // 0: No new-line at end of input. + { + in: `bla 3.14`, + err: "EOF", + }, + // 1: Invalid escape sequence in label value. + { + in: `metric{label="\t"} 3.14`, + err: "text format parsing error in line 1: invalid escape sequence", + }, + // 2: Newline in label value. + { + in: ` +metric{label="new +line"} 3.14 +`, + err: `text format parsing error in line 2: label value "new" contains unescaped new-line`, + }, + // 3: + { + in: `metric{@="bla"} 3.14`, + err: "text format parsing error in line 1: invalid label name for metric", + }, + // 4: + { + in: `metric{__name__="bla"} 3.14`, + err: `text format parsing error in line 1: label name "__name__" is reserved`, + }, + // 5: + { + in: `metric{label+="bla"} 3.14`, + err: "text format parsing error in line 1: expected '=' after label name", + }, + // 6: + { + in: `metric{label=bla} 3.14`, + err: "text format parsing error in line 1: expected '\"' at start of label value", + }, + // 7: + { + in: ` +# TYPE metric summary +metric{quantile="bla"} 3.14 +`, + err: "text format parsing error in line 3: expected float as value for 'quantile' label", + }, + // 8: + { + in: `metric{label="bla"+} 3.14`, + err: "text format parsing error in line 1: unexpected end of label value", + }, + // 9: + { + in: `metric{label="bla"} 3.14 2.72 +`, + err: "text format parsing error in line 1: expected integer as timestamp", + }, + // 10: + { + in: `metric{label="bla"} 3.14 2 3 +`, + err: "text format parsing error in line 1: spurious string after timestamp", + }, + // 11: + { + in: `metric{label="bla"} blubb +`, + err: "text format parsing error in line 1: expected float as value", + }, + // 12: + { + in: ` +# HELP metric one +# HELP metric two +`, + err: "text format parsing error in line 3: second HELP line for metric name", + }, + // 13: + { + in: ` +# TYPE metric counter +# TYPE metric untyped +`, + err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`, + }, + // 14: + { + in: ` +metric 4.12 +# TYPE metric counter +`, + err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`, + }, + // 14: + { + in: ` +# TYPE metric bla +`, + err: "text format parsing error in line 2: unknown metric type", + }, + // 15: + { + in: ` +# TYPE met-ric +`, + err: "text format parsing error in line 2: invalid metric name in comment", + }, + // 16: + { + in: `@invalidmetric{label="bla"} 3.14 2`, + err: "text format parsing error in line 1: invalid metric name", + }, + // 17: + { + in: `{label="bla"} 3.14 2`, + err: "text format parsing error in line 1: invalid metric name", + }, + // 18: + { + in: ` +# TYPE metric histogram +metric_bucket{le="bla"} 3.14 +`, + err: "text format parsing error in line 3: expected float as value for 'le' label", + }, + } + + for i, scenario := range scenarios { + _, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in)) + if err == nil { + t.Errorf("%d. expected error, got nil", i) + continue + } + if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 { + t.Errorf( + "%d. expected error starting with %q, got %q", + i, expected, got, + ) + } + } + +} + +func TestTextParseError(t *testing.T) { + testTextParseError(t) +} + +func BenchmarkParseError(b *testing.B) { + for i := 0; i < b.N; i++ { + testTextParseError(b) + } +} diff --git a/vendor/github.com/prometheus/common/model/BUILD b/vendor/github.com/prometheus/common/model/BUILD index b271f8f2541e..e28a566b739b 100644 --- a/vendor/github.com/prometheus/common/model/BUILD +++ b/vendor/github.com/prometheus/common/model/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -20,6 +15,20 @@ go_library( "value.go", ], importpath = "github.com/prometheus/common/model", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = [ + "labels_test.go", + "metric_test.go", + "signature_test.go", + "time_test.go", + "value_test.go", + ], + importpath = "github.com/prometheus/common/model", + library = ":go_default_library", ) filegroup( @@ -33,4 +42,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/prometheus/common/model/labels_test.go b/vendor/github.com/prometheus/common/model/labels_test.go new file mode 100644 index 000000000000..ab17025c76aa --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels_test.go @@ -0,0 +1,91 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" + "testing" +) + +func testLabelNames(t testing.TB) { + var scenarios = []struct { + in LabelNames + out LabelNames + }{ + { + in: LabelNames{"ZZZ", "zzz"}, + out: LabelNames{"ZZZ", "zzz"}, + }, + { + in: LabelNames{"aaa", "AAA"}, + out: LabelNames{"AAA", "aaa"}, + }, + } + + for i, scenario := range scenarios { + sort.Sort(scenario.in) + + for j, expected := range scenario.out { + if expected != scenario.in[j] { + t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j]) + } + } + } +} + +func TestLabelNames(t *testing.T) { + testLabelNames(t) +} + +func BenchmarkLabelNames(b *testing.B) { + for i := 0; i < b.N; i++ { + testLabelNames(b) + } +} + +func testLabelValues(t testing.TB) { + var scenarios = []struct { + in LabelValues + out LabelValues + }{ + { + in: LabelValues{"ZZZ", "zzz"}, + out: LabelValues{"ZZZ", "zzz"}, + }, + { + in: LabelValues{"aaa", "AAA"}, + out: LabelValues{"AAA", "aaa"}, + }, + } + + for i, scenario := range scenarios { + sort.Sort(scenario.in) + + for j, expected := range scenario.out { + if expected != scenario.in[j] { + t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j]) + } + } + } +} + +func TestLabelValues(t *testing.T) { + testLabelValues(t) +} + +func BenchmarkLabelValues(b *testing.B) { + for i := 0; i < b.N; i++ { + testLabelValues(b) + } +} diff --git a/vendor/github.com/prometheus/common/model/metric_test.go b/vendor/github.com/prometheus/common/model/metric_test.go new file mode 100644 index 000000000000..5c7cfceafe96 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric_test.go @@ -0,0 +1,83 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import "testing" + +func testMetric(t testing.TB) { + var scenarios = []struct { + input LabelSet + fingerprint Fingerprint + fastFingerprint Fingerprint + }{ + { + input: LabelSet{}, + fingerprint: 14695981039346656037, + fastFingerprint: 14695981039346656037, + }, + { + input: LabelSet{ + "first_name": "electro", + "occupation": "robot", + "manufacturer": "westinghouse", + }, + fingerprint: 5911716720268894962, + fastFingerprint: 11310079640881077873, + }, + { + input: LabelSet{ + "x": "y", + }, + fingerprint: 8241431561484471700, + fastFingerprint: 13948396922932177635, + }, + { + input: LabelSet{ + "a": "bb", + "b": "c", + }, + fingerprint: 3016285359649981711, + fastFingerprint: 3198632812309449502, + }, + { + input: LabelSet{ + "a": "b", + "bb": "c", + }, + fingerprint: 7122421792099404749, + fastFingerprint: 5774953389407657638, + }, + } + + for i, scenario := range scenarios { + input := Metric(scenario.input) + + if scenario.fingerprint != input.Fingerprint() { + t.Errorf("%d. expected %d, got %d", i, scenario.fingerprint, input.Fingerprint()) + } + if scenario.fastFingerprint != input.FastFingerprint() { + t.Errorf("%d. expected %d, got %d", i, scenario.fastFingerprint, input.FastFingerprint()) + } + } +} + +func TestMetric(t *testing.T) { + testMetric(t) +} + +func BenchmarkMetric(b *testing.B) { + for i := 0; i < b.N; i++ { + testMetric(b) + } +} diff --git a/vendor/github.com/prometheus/common/model/signature_test.go b/vendor/github.com/prometheus/common/model/signature_test.go new file mode 100644 index 000000000000..d9c665f8c79e --- /dev/null +++ b/vendor/github.com/prometheus/common/model/signature_test.go @@ -0,0 +1,304 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "runtime" + "sync" + "testing" +) + +func TestLabelsToSignature(t *testing.T) { + var scenarios = []struct { + in map[string]string + out uint64 + }{ + { + in: map[string]string{}, + out: 14695981039346656037, + }, + { + in: map[string]string{"name": "garland, briggs", "fear": "love is not enough"}, + out: 5799056148416392346, + }, + } + + for i, scenario := range scenarios { + actual := LabelsToSignature(scenario.in) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func TestMetricToFingerprint(t *testing.T) { + var scenarios = []struct { + in LabelSet + out Fingerprint + }{ + { + in: LabelSet{}, + out: 14695981039346656037, + }, + { + in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"}, + out: 5799056148416392346, + }, + } + + for i, scenario := range scenarios { + actual := labelSetToFingerprint(scenario.in) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func TestMetricToFastFingerprint(t *testing.T) { + var scenarios = []struct { + in LabelSet + out Fingerprint + }{ + { + in: LabelSet{}, + out: 14695981039346656037, + }, + { + in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"}, + out: 12952432476264840823, + }, + } + + for i, scenario := range scenarios { + actual := labelSetToFastFingerprint(scenario.in) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func TestSignatureForLabels(t *testing.T) { + var scenarios = []struct { + in Metric + labels LabelNames + out uint64 + }{ + { + in: Metric{}, + labels: nil, + out: 14695981039346656037, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: LabelNames{"fear", "name"}, + out: 5799056148416392346, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"}, + labels: LabelNames{"fear", "name"}, + out: 5799056148416392346, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: LabelNames{}, + out: 14695981039346656037, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: nil, + out: 14695981039346656037, + }, + } + + for i, scenario := range scenarios { + actual := SignatureForLabels(scenario.in, scenario.labels...) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func TestSignatureWithoutLabels(t *testing.T) { + var scenarios = []struct { + in Metric + labels map[LabelName]struct{} + out uint64 + }{ + { + in: Metric{}, + labels: nil, + out: 14695981039346656037, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: map[LabelName]struct{}{"fear": struct{}{}, "name": struct{}{}}, + out: 14695981039346656037, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"}, + labels: map[LabelName]struct{}{"foo": struct{}{}}, + out: 5799056148416392346, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: map[LabelName]struct{}{}, + out: 5799056148416392346, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: nil, + out: 5799056148416392346, + }, + } + + for i, scenario := range scenarios { + actual := SignatureWithoutLabels(scenario.in, scenario.labels) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func benchmarkLabelToSignature(b *testing.B, l map[string]string, e uint64) { + for i := 0; i < b.N; i++ { + if a := LabelsToSignature(l); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, l, a) + } + } +} + +func BenchmarkLabelToSignatureScalar(b *testing.B) { + benchmarkLabelToSignature(b, nil, 14695981039346656037) +} + +func BenchmarkLabelToSignatureSingle(b *testing.B) { + benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value"}, 5146282821936882169) +} + +func BenchmarkLabelToSignatureDouble(b *testing.B) { + benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717) +} + +func BenchmarkLabelToSignatureTriple(b *testing.B) { + benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121) +} + +func benchmarkMetricToFingerprint(b *testing.B, ls LabelSet, e Fingerprint) { + for i := 0; i < b.N; i++ { + if a := labelSetToFingerprint(ls); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, ls, a) + } + } +} + +func BenchmarkMetricToFingerprintScalar(b *testing.B) { + benchmarkMetricToFingerprint(b, nil, 14695981039346656037) +} + +func BenchmarkMetricToFingerprintSingle(b *testing.B) { + benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5146282821936882169) +} + +func BenchmarkMetricToFingerprintDouble(b *testing.B) { + benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717) +} + +func BenchmarkMetricToFingerprintTriple(b *testing.B) { + benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121) +} + +func benchmarkMetricToFastFingerprint(b *testing.B, ls LabelSet, e Fingerprint) { + for i := 0; i < b.N; i++ { + if a := labelSetToFastFingerprint(ls); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, ls, a) + } + } +} + +func BenchmarkMetricToFastFingerprintScalar(b *testing.B) { + benchmarkMetricToFastFingerprint(b, nil, 14695981039346656037) +} + +func BenchmarkMetricToFastFingerprintSingle(b *testing.B) { + benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5147259542624943964) +} + +func BenchmarkMetricToFastFingerprintDouble(b *testing.B) { + benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 18269973311206963528) +} + +func BenchmarkMetricToFastFingerprintTriple(b *testing.B) { + benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676) +} + +func BenchmarkEmptyLabelSignature(b *testing.B) { + input := []map[string]string{nil, {}} + + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + + alloc := ms.Alloc + + for _, labels := range input { + LabelsToSignature(labels) + } + + runtime.ReadMemStats(&ms) + + if got := ms.Alloc; alloc != got { + b.Fatal("expected LabelsToSignature with empty labels not to perform allocations") + } +} + +func benchmarkMetricToFastFingerprintConc(b *testing.B, ls LabelSet, e Fingerprint, concLevel int) { + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + for i := 0; i < concLevel; i++ { + go func() { + start.Wait() + for j := b.N / concLevel; j >= 0; j-- { + if a := labelSetToFastFingerprint(ls); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, ls, a) + } + } + end.Done() + }() + } + b.ResetTimer() + start.Done() + end.Wait() +} + +func BenchmarkMetricToFastFingerprintTripleConc1(b *testing.B) { + benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 1) +} + +func BenchmarkMetricToFastFingerprintTripleConc2(b *testing.B) { + benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 2) +} + +func BenchmarkMetricToFastFingerprintTripleConc4(b *testing.B) { + benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 4) +} + +func BenchmarkMetricToFastFingerprintTripleConc8(b *testing.B) { + benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 8) +} diff --git a/vendor/github.com/prometheus/common/model/time_test.go b/vendor/github.com/prometheus/common/model/time_test.go new file mode 100644 index 000000000000..9013a62775b1 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/time_test.go @@ -0,0 +1,86 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "testing" + "time" +) + +func TestComparators(t *testing.T) { + t1a := TimeFromUnix(0) + t1b := TimeFromUnix(0) + t2 := TimeFromUnix(2*second - 1) + + if !t1a.Equal(t1b) { + t.Fatalf("Expected %s to be equal to %s", t1a, t1b) + } + if t1a.Equal(t2) { + t.Fatalf("Expected %s to not be equal to %s", t1a, t2) + } + + if !t1a.Before(t2) { + t.Fatalf("Expected %s to be before %s", t1a, t2) + } + if t1a.Before(t1b) { + t.Fatalf("Expected %s to not be before %s", t1a, t1b) + } + + if !t2.After(t1a) { + t.Fatalf("Expected %s to be after %s", t2, t1a) + } + if t1b.After(t1a) { + t.Fatalf("Expected %s to not be after %s", t1b, t1a) + } +} + +func TestTimeConversions(t *testing.T) { + unixSecs := int64(1136239445) + unixNsecs := int64(123456789) + unixNano := unixSecs*1e9 + unixNsecs + + t1 := time.Unix(unixSecs, unixNsecs-unixNsecs%nanosPerTick) + t2 := time.Unix(unixSecs, unixNsecs) + + ts := TimeFromUnixNano(unixNano) + if !ts.Time().Equal(t1) { + t.Fatalf("Expected %s, got %s", t1, ts.Time()) + } + + // Test available precision. + ts = TimeFromUnixNano(t2.UnixNano()) + if !ts.Time().Equal(t1) { + t.Fatalf("Expected %s, got %s", t1, ts.Time()) + } + + if ts.UnixNano() != unixNano-unixNano%nanosPerTick { + t.Fatalf("Expected %d, got %d", unixNano, ts.UnixNano()) + } +} + +func TestDuration(t *testing.T) { + duration := time.Second + time.Minute + time.Hour + goTime := time.Unix(1136239445, 0) + + ts := TimeFromUnix(goTime.Unix()) + if !goTime.Add(duration).Equal(ts.Add(duration).Time()) { + t.Fatalf("Expected %s to be equal to %s", goTime.Add(duration), ts.Add(duration)) + } + + earlier := ts.Add(-duration) + delta := ts.Sub(earlier) + if delta != duration { + t.Fatalf("Expected %s to be equal to %s", delta, duration) + } +} diff --git a/vendor/github.com/prometheus/common/model/value_test.go b/vendor/github.com/prometheus/common/model/value_test.go new file mode 100644 index 000000000000..2e9c7eb09d1a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_test.go @@ -0,0 +1,362 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "math" + "reflect" + "sort" + "testing" +) + +func TestSamplePairJSON(t *testing.T) { + input := []struct { + plain string + value SamplePair + }{ + { + plain: `[1234.567,"123.1"]`, + value: SamplePair{ + Value: 123.1, + Timestamp: 1234567, + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var sp SamplePair + err = json.Unmarshal(b, &sp) + if err != nil { + t.Error(err) + continue + } + + if sp != test.value { + t.Errorf("decoding error: expected %v, got %v", test.value, sp) + } + } +} + +func TestSampleJSON(t *testing.T) { + input := []struct { + plain string + value Sample + }{ + { + plain: `{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}`, + value: Sample{ + Metric: Metric{ + MetricNameLabel: "test_metric", + }, + Value: 123.1, + Timestamp: 1234567, + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var sv Sample + err = json.Unmarshal(b, &sv) + if err != nil { + t.Error(err) + continue + } + + if !reflect.DeepEqual(sv, test.value) { + t.Errorf("decoding error: expected %v, got %v", test.value, sv) + } + } +} + +func TestVectorJSON(t *testing.T) { + input := []struct { + plain string + value Vector + }{ + { + plain: `[]`, + value: Vector{}, + }, + { + plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}]`, + value: Vector{&Sample{ + Metric: Metric{ + MetricNameLabel: "test_metric", + }, + Value: 123.1, + Timestamp: 1234567, + }}, + }, + { + plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]},{"metric":{"foo":"bar"},"value":[1.234,"+Inf"]}]`, + value: Vector{ + &Sample{ + Metric: Metric{ + MetricNameLabel: "test_metric", + }, + Value: 123.1, + Timestamp: 1234567, + }, + &Sample{ + Metric: Metric{ + "foo": "bar", + }, + Value: SampleValue(math.Inf(1)), + Timestamp: 1234, + }, + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var vec Vector + err = json.Unmarshal(b, &vec) + if err != nil { + t.Error(err) + continue + } + + if !reflect.DeepEqual(vec, test.value) { + t.Errorf("decoding error: expected %v, got %v", test.value, vec) + } + } +} + +func TestScalarJSON(t *testing.T) { + input := []struct { + plain string + value Scalar + }{ + { + plain: `[123.456,"456"]`, + value: Scalar{ + Timestamp: 123456, + Value: 456, + }, + }, + { + plain: `[123123.456,"+Inf"]`, + value: Scalar{ + Timestamp: 123123456, + Value: SampleValue(math.Inf(1)), + }, + }, + { + plain: `[123123.456,"-Inf"]`, + value: Scalar{ + Timestamp: 123123456, + Value: SampleValue(math.Inf(-1)), + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var sv Scalar + err = json.Unmarshal(b, &sv) + if err != nil { + t.Error(err) + continue + } + + if sv != test.value { + t.Errorf("decoding error: expected %v, got %v", test.value, sv) + } + } +} + +func TestStringJSON(t *testing.T) { + input := []struct { + plain string + value String + }{ + { + plain: `[123.456,"test"]`, + value: String{ + Timestamp: 123456, + Value: "test", + }, + }, + { + plain: `[123123.456,"台北"]`, + value: String{ + Timestamp: 123123456, + Value: "台北", + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var sv String + err = json.Unmarshal(b, &sv) + if err != nil { + t.Error(err) + continue + } + + if sv != test.value { + t.Errorf("decoding error: expected %v, got %v", test.value, sv) + } + } +} + +func TestVectorSort(t *testing.T) { + input := Vector{ + &Sample{ + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 2, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 2, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 2, + }, + } + + expected := Vector{ + &Sample{ + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 2, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 2, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 2, + }, + } + + sort.Sort(input) + + for i, actual := range input { + actualFp := actual.Metric.Fingerprint() + expectedFp := expected[i].Metric.Fingerprint() + + if actualFp != expectedFp { + t.Fatalf("%d. Incorrect fingerprint. Got %s; want %s", i, actualFp.String(), expectedFp.String()) + } + + if actual.Timestamp != expected[i].Timestamp { + t.Fatalf("%d. Incorrect timestamp. Got %s; want %s", i, actual.Timestamp, expected[i].Timestamp) + } + } +} diff --git a/vendor/github.com/prometheus/procfs/BUILD b/vendor/github.com/prometheus/procfs/BUILD index abf7d76f1289..dfe6ad5ba4cb 100644 --- a/vendor/github.com/prometheus/procfs/BUILD +++ b/vendor/github.com/prometheus/procfs/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -19,6 +14,23 @@ go_library( "stat.go", ], importpath = "github.com/prometheus/procfs", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = [ + "fs_test.go", + "ipvs_test.go", + "mdstat_test.go", + "proc_io_test.go", + "proc_limits_test.go", + "proc_stat_test.go", + "proc_test.go", + "stat_test.go", + ], + importpath = "github.com/prometheus/procfs", + library = ":go_default_library", ) filegroup( @@ -32,4 +44,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/prometheus/procfs/fs_test.go b/vendor/github.com/prometheus/procfs/fs_test.go new file mode 100644 index 000000000000..91f1c6c97635 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs_test.go @@ -0,0 +1,13 @@ +package procfs + +import "testing" + +func TestNewFS(t *testing.T) { + if _, err := NewFS("foobar"); err == nil { + t.Error("want NewFS to fail for non-existing mount point") + } + + if _, err := NewFS("procfs.go"); err == nil { + t.Error("want NewFS to fail if mount point is not a directory") + } +} diff --git a/vendor/github.com/prometheus/procfs/ipvs_test.go b/vendor/github.com/prometheus/procfs/ipvs_test.go new file mode 100644 index 000000000000..6036cde84352 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ipvs_test.go @@ -0,0 +1,196 @@ +package procfs + +import ( + "net" + "testing" +) + +var ( + expectedIPVSStats = IPVSStats{ + Connections: 23765872, + IncomingPackets: 3811989221, + OutgoingPackets: 0, + IncomingBytes: 89991519156915, + OutgoingBytes: 0, + } + expectedIPVSBackendStatuses = []IPVSBackendStatus{ + IPVSBackendStatus{ + LocalAddress: net.ParseIP("192.168.0.22"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.82.22"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 248, + InactConn: 2, + }, + IPVSBackendStatus{ + LocalAddress: net.ParseIP("192.168.0.22"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.83.24"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 248, + InactConn: 2, + }, + IPVSBackendStatus{ + LocalAddress: net.ParseIP("192.168.0.22"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.83.21"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 248, + InactConn: 1, + }, + IPVSBackendStatus{ + LocalAddress: net.ParseIP("192.168.0.57"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.84.22"), + RemotePort: 3306, + Proto: "TCP", + Weight: 0, + ActiveConn: 0, + InactConn: 0, + }, + IPVSBackendStatus{ + LocalAddress: net.ParseIP("192.168.0.57"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.82.21"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 1499, + InactConn: 0, + }, + IPVSBackendStatus{ + LocalAddress: net.ParseIP("192.168.0.57"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.50.21"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 1498, + InactConn: 0, + }, + IPVSBackendStatus{ + LocalAddress: net.ParseIP("192.168.0.55"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.50.26"), + RemotePort: 3306, + Proto: "TCP", + Weight: 0, + ActiveConn: 0, + InactConn: 0, + }, + IPVSBackendStatus{ + LocalAddress: net.ParseIP("192.168.0.55"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.49.32"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 0, + InactConn: 0, + }, + } +) + +func TestIPVSStats(t *testing.T) { + fs, err := NewFS("fixtures") + if err != nil { + t.Fatal(err) + } + stats, err := fs.NewIPVSStats() + if err != nil { + t.Fatal(err) + } + + if stats != expectedIPVSStats { + t.Errorf("want %+v, got %+v", expectedIPVSStats, stats) + } +} + +func TestParseIPPort(t *testing.T) { + ip := net.ParseIP("192.168.0.22") + port := uint16(3306) + + gotIP, gotPort, err := parseIPPort("C0A80016:0CEA") + if err != nil { + t.Fatal(err) + } + if !(gotIP.Equal(ip) && port == gotPort) { + t.Errorf("want %s:%d, got %s:%d", ip, port, gotIP, gotPort) + } +} + +func TestParseIPPortInvalid(t *testing.T) { + testcases := []string{ + "", + "C0A80016", + "C0A800:1234", + "FOOBARBA:1234", + "C0A80016:0CEA:1234", + } + + for _, s := range testcases { + ip, port, err := parseIPPort(s) + if ip != nil || port != uint16(0) || err == nil { + t.Errorf("Expected error for input %s, got ip = %s, port = %v, err = %v", s, ip, port, err) + } + } +} + +func TestParseIPPortIPv6(t *testing.T) { + ip := net.ParseIP("dead:beef::1") + port := uint16(8080) + + gotIP, gotPort, err := parseIPPort("DEADBEEF000000000000000000000001:1F90") + if err != nil { + t.Fatal(err) + } + if !(gotIP.Equal(ip) && port == gotPort) { + t.Errorf("want %s:%d, got %s:%d", ip, port, gotIP, gotPort) + } + +} + +func TestIPVSBackendStatus(t *testing.T) { + fs, err := NewFS("fixtures") + if err != nil { + t.Fatal(err) + } + + backendStats, err := fs.NewIPVSBackendStatus() + if err != nil { + t.Fatal(err) + } + + for idx, expect := range expectedIPVSBackendStatuses { + if !backendStats[idx].LocalAddress.Equal(expect.LocalAddress) { + t.Errorf("expected LocalAddress %s, got %s", expect.LocalAddress, backendStats[idx].LocalAddress) + } + if backendStats[idx].LocalPort != expect.LocalPort { + t.Errorf("expected LocalPort %d, got %d", expect.LocalPort, backendStats[idx].LocalPort) + } + if !backendStats[idx].RemoteAddress.Equal(expect.RemoteAddress) { + t.Errorf("expected RemoteAddress %s, got %s", expect.RemoteAddress, backendStats[idx].RemoteAddress) + } + if backendStats[idx].RemotePort != expect.RemotePort { + t.Errorf("expected RemotePort %d, got %d", expect.RemotePort, backendStats[idx].RemotePort) + } + if backendStats[idx].Proto != expect.Proto { + t.Errorf("expected Proto %s, got %s", expect.Proto, backendStats[idx].Proto) + } + if backendStats[idx].Weight != expect.Weight { + t.Errorf("expected Weight %d, got %d", expect.Weight, backendStats[idx].Weight) + } + if backendStats[idx].ActiveConn != expect.ActiveConn { + t.Errorf("expected ActiveConn %d, got %d", expect.ActiveConn, backendStats[idx].ActiveConn) + } + if backendStats[idx].InactConn != expect.InactConn { + t.Errorf("expected InactConn %d, got %d", expect.InactConn, backendStats[idx].InactConn) + } + } +} diff --git a/vendor/github.com/prometheus/procfs/mdstat_test.go b/vendor/github.com/prometheus/procfs/mdstat_test.go new file mode 100644 index 000000000000..7b9a3a5f5fee --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mdstat_test.go @@ -0,0 +1,33 @@ +package procfs + +import ( + "testing" +) + +func TestMDStat(t *testing.T) { + fs := FS("fixtures") + mdStates, err := fs.ParseMDStat() + if err != nil { + t.Fatalf("parsing of reference-file failed entirely: %s", err) + } + + refs := map[string]MDStat{ + "md3": MDStat{"md3", "active", 8, 8, 5853468288, 5853468288}, + "md127": MDStat{"md127", "active", 2, 2, 312319552, 312319552}, + "md0": MDStat{"md0", "active", 2, 2, 248896, 248896}, + "md4": MDStat{"md4", "inactive", 2, 2, 4883648, 4883648}, + "md6": MDStat{"md6", "active", 1, 2, 195310144, 16775552}, + "md8": MDStat{"md8", "active", 2, 2, 195310144, 16775552}, + "md7": MDStat{"md7", "active", 3, 4, 7813735424, 7813735424}, + } + + for _, md := range mdStates { + if md != refs[md.Name] { + t.Errorf("failed parsing md-device %s correctly: want %v, got %v", md.Name, refs[md.Name], md) + } + } + + if want, have := len(refs), len(mdStates); want != have { + t.Errorf("want %d parsed md-devices, have %d", want, have) + } +} diff --git a/vendor/github.com/prometheus/procfs/proc_io_test.go b/vendor/github.com/prometheus/procfs/proc_io_test.go new file mode 100644 index 000000000000..5ef524d8e0f8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_io_test.go @@ -0,0 +1,49 @@ +package procfs + +import "testing" + +func TestProcIO(t *testing.T) { + fs, err := NewFS("fixtures") + if err != nil { + t.Fatal(err) + } + + p, err := fs.NewProc(26231) + if err != nil { + t.Fatal(err) + } + + s, err := p.NewIO() + if err != nil { + t.Fatal(err) + } + + for _, test := range []struct { + name string + want uint64 + got uint64 + }{ + {name: "RChar", want: 750339, got: s.RChar}, + {name: "WChar", want: 818609, got: s.WChar}, + {name: "SyscR", want: 7405, got: s.SyscR}, + {name: "SyscW", want: 5245, got: s.SyscW}, + {name: "ReadBytes", want: 1024, got: s.ReadBytes}, + {name: "WriteBytes", want: 2048, got: s.WriteBytes}, + } { + if test.want != test.got { + t.Errorf("want %s %d, got %d", test.name, test.want, test.got) + } + } + + for _, test := range []struct { + name string + want int64 + got int64 + }{ + {name: "CancelledWriteBytes", want: -1024, got: s.CancelledWriteBytes}, + } { + if test.want != test.got { + t.Errorf("want %s %d, got %d", test.name, test.want, test.got) + } + } +} diff --git a/vendor/github.com/prometheus/procfs/proc_limits_test.go b/vendor/github.com/prometheus/procfs/proc_limits_test.go new file mode 100644 index 000000000000..ca7a254da758 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_limits_test.go @@ -0,0 +1,36 @@ +package procfs + +import "testing" + +func TestNewLimits(t *testing.T) { + fs, err := NewFS("fixtures") + if err != nil { + t.Fatal(err) + } + + p, err := fs.NewProc(26231) + if err != nil { + t.Fatal(err) + } + + l, err := p.NewLimits() + if err != nil { + t.Fatal(err) + } + + for _, test := range []struct { + name string + want int + got int + }{ + {name: "cpu time", want: -1, got: l.CPUTime}, + {name: "open files", want: 2048, got: l.OpenFiles}, + {name: "msgqueue size", want: 819200, got: l.MsqqueueSize}, + {name: "nice priority", want: 0, got: l.NicePriority}, + {name: "address space", want: -1, got: l.AddressSpace}, + } { + if test.want != test.got { + t.Errorf("want %s %d, got %d", test.name, test.want, test.got) + } + } +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat_test.go b/vendor/github.com/prometheus/procfs/proc_stat_test.go new file mode 100644 index 000000000000..e4d5cacfa4bf --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_stat_test.go @@ -0,0 +1,112 @@ +package procfs + +import "testing" + +func TestProcStat(t *testing.T) { + fs, err := NewFS("fixtures") + if err != nil { + t.Fatal(err) + } + + p, err := fs.NewProc(26231) + if err != nil { + t.Fatal(err) + } + + s, err := p.NewStat() + if err != nil { + t.Fatal(err) + } + + for _, test := range []struct { + name string + want int + got int + }{ + {name: "pid", want: 26231, got: s.PID}, + {name: "user time", want: 1677, got: int(s.UTime)}, + {name: "system time", want: 44, got: int(s.STime)}, + {name: "start time", want: 82375, got: int(s.Starttime)}, + {name: "virtual memory size", want: 56274944, got: s.VSize}, + {name: "resident set size", want: 1981, got: s.RSS}, + } { + if test.want != test.got { + t.Errorf("want %s %d, got %d", test.name, test.want, test.got) + } + } +} + +func TestProcStatComm(t *testing.T) { + s1, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + if want, got := "vim", s1.Comm; want != got { + t.Errorf("want comm %s, got %s", want, got) + } + + s2, err := testProcStat(584) + if err != nil { + t.Fatal(err) + } + if want, got := "(a b ) ( c d) ", s2.Comm; want != got { + t.Errorf("want comm %s, got %s", want, got) + } +} + +func TestProcStatVirtualMemory(t *testing.T) { + s, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + + if want, got := 56274944, s.VirtualMemory(); want != got { + t.Errorf("want virtual memory %d, got %d", want, got) + } +} + +func TestProcStatResidentMemory(t *testing.T) { + s, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + + if want, got := 1981*4096, s.ResidentMemory(); want != got { + t.Errorf("want resident memory %d, got %d", want, got) + } +} + +func TestProcStatStartTime(t *testing.T) { + s, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + + time, err := s.StartTime() + if err != nil { + t.Fatal(err) + } + if want, got := 1418184099.75, time; want != got { + t.Errorf("want start time %f, got %f", want, got) + } +} + +func TestProcStatCPUTime(t *testing.T) { + s, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + + if want, got := 17.21, s.CPUTime(); want != got { + t.Errorf("want cpu time %f, got %f", want, got) + } +} + +func testProcStat(pid int) (ProcStat, error) { + p, err := testProcess(pid) + if err != nil { + return ProcStat{}, err + } + + return p.NewStat() +} diff --git a/vendor/github.com/prometheus/procfs/proc_test.go b/vendor/github.com/prometheus/procfs/proc_test.go new file mode 100644 index 000000000000..4197282ca84d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_test.go @@ -0,0 +1,146 @@ +package procfs + +import ( + "reflect" + "sort" + "testing" +) + +func TestSelf(t *testing.T) { + fs := FS("fixtures") + + p1, err := fs.NewProc(26231) + if err != nil { + t.Fatal(err) + } + p2, err := fs.Self() + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(p1, p2) { + t.Errorf("want process %v to equal %v", p1, p2) + } +} + +func TestAllProcs(t *testing.T) { + procs, err := FS("fixtures").AllProcs() + if err != nil { + t.Fatal(err) + } + sort.Sort(procs) + for i, p := range []*Proc{{PID: 584}, {PID: 26231}} { + if want, got := p.PID, procs[i].PID; want != got { + t.Errorf("want processes %d, got %d", want, got) + } + } +} + +func TestCmdLine(t *testing.T) { + for _, tt := range []struct { + process int + want []string + }{ + {process: 26231, want: []string{"vim", "test.go", "+10"}}, + {process: 26232, want: []string{}}, + } { + p1, err := testProcess(tt.process) + if err != nil { + t.Fatal(err) + } + c1, err := p1.CmdLine() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(tt.want, c1) { + t.Errorf("want cmdline %v, got %v", tt.want, c1) + } + } +} + +func TestExecutable(t *testing.T) { + for _, tt := range []struct { + process int + want string + }{ + {process: 26231, want: "/usr/bin/vim"}, + {process: 26232, want: ""}, + } { + p, err := testProcess(tt.process) + if err != nil { + t.Fatal(err) + } + exe, err := p.Executable() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(tt.want, exe) { + t.Errorf("want absolute path to cmdline %v, got %v", tt.want, exe) + } + } +} + +func TestFileDescriptors(t *testing.T) { + p1, err := testProcess(26231) + if err != nil { + t.Fatal(err) + } + fds, err := p1.FileDescriptors() + if err != nil { + t.Fatal(err) + } + sort.Sort(byUintptr(fds)) + if want := []uintptr{0, 1, 2, 3, 10}; !reflect.DeepEqual(want, fds) { + t.Errorf("want fds %v, got %v", want, fds) + } +} + +func TestFileDescriptorTargets(t *testing.T) { + p1, err := testProcess(26231) + if err != nil { + t.Fatal(err) + } + fds, err := p1.FileDescriptorTargets() + if err != nil { + t.Fatal(err) + } + sort.Strings(fds) + var want = []string{ + "../../symlinktargets/abc", + "../../symlinktargets/def", + "../../symlinktargets/ghi", + "../../symlinktargets/uvw", + "../../symlinktargets/xyz", + } + if !reflect.DeepEqual(want, fds) { + t.Errorf("want fds %v, got %v", want, fds) + } +} + +func TestFileDescriptorsLen(t *testing.T) { + p1, err := testProcess(26231) + if err != nil { + t.Fatal(err) + } + l, err := p1.FileDescriptorsLen() + if err != nil { + t.Fatal(err) + } + if want, got := 5, l; want != got { + t.Errorf("want fds %d, got %d", want, got) + } +} + +func testProcess(pid int) (Proc, error) { + fs, err := NewFS("fixtures") + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +type byUintptr []uintptr + +func (a byUintptr) Len() int { return len(a) } +func (a byUintptr) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byUintptr) Less(i, j int) bool { return a[i] < a[j] } diff --git a/vendor/github.com/prometheus/procfs/stat_test.go b/vendor/github.com/prometheus/procfs/stat_test.go new file mode 100644 index 000000000000..24b5d61f8e91 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/stat_test.go @@ -0,0 +1,19 @@ +package procfs + +import "testing" + +func TestStat(t *testing.T) { + fs, err := NewFS("fixtures") + if err != nil { + t.Fatal(err) + } + + s, err := fs.NewStat() + if err != nil { + t.Fatal(err) + } + + if want, got := int64(1418183276), s.BootTime; want != got { + t.Errorf("want boot time %d, got %d", want, got) + } +} diff --git a/vendor/github.com/satori/go.uuid/BUILD b/vendor/github.com/satori/go.uuid/BUILD index 293b42b9e191..d38ad2c76d34 100644 --- a/vendor/github.com/satori/go.uuid/BUILD +++ b/vendor/github.com/satori/go.uuid/BUILD @@ -1,14 +1,20 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["uuid.go"], importpath = "github.com/satori/go.uuid", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = [ + "benchmarks_test.go", + "uuid_test.go", + ], + importpath = "github.com/satori/go.uuid", + library = ":go_default_library", ) filegroup( @@ -22,4 +28,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/satori/go.uuid/benchmarks_test.go b/vendor/github.com/satori/go.uuid/benchmarks_test.go new file mode 100644 index 000000000000..b4e567fc64f8 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/benchmarks_test.go @@ -0,0 +1,121 @@ +// Copyright (C) 2013-2015 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "testing" +) + +func BenchmarkFromBytes(b *testing.B) { + bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + for i := 0; i < b.N; i++ { + FromBytes(bytes) + } +} + +func BenchmarkFromString(b *testing.B) { + s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + for i := 0; i < b.N; i++ { + FromString(s) + } +} + +func BenchmarkFromStringUrn(b *testing.B) { + s := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" + for i := 0; i < b.N; i++ { + FromString(s) + } +} + +func BenchmarkFromStringWithBrackets(b *testing.B) { + s := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" + for i := 0; i < b.N; i++ { + FromString(s) + } +} + +func BenchmarkNewV1(b *testing.B) { + for i := 0; i < b.N; i++ { + NewV1() + } +} + +func BenchmarkNewV2(b *testing.B) { + for i := 0; i < b.N; i++ { + NewV2(DomainPerson) + } +} + +func BenchmarkNewV3(b *testing.B) { + for i := 0; i < b.N; i++ { + NewV3(NamespaceDNS, "www.example.com") + } +} + +func BenchmarkNewV4(b *testing.B) { + for i := 0; i < b.N; i++ { + NewV4() + } +} + +func BenchmarkNewV5(b *testing.B) { + for i := 0; i < b.N; i++ { + NewV5(NamespaceDNS, "www.example.com") + } +} + +func BenchmarkMarshalBinary(b *testing.B) { + u := NewV4() + for i := 0; i < b.N; i++ { + u.MarshalBinary() + } +} + +func BenchmarkMarshalText(b *testing.B) { + u := NewV4() + for i := 0; i < b.N; i++ { + u.MarshalText() + } +} + +func BenchmarkUnmarshalBinary(b *testing.B) { + bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + u := UUID{} + for i := 0; i < b.N; i++ { + u.UnmarshalBinary(bytes) + } +} + +func BenchmarkUnmarshalText(b *testing.B) { + bytes := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + u := UUID{} + for i := 0; i < b.N; i++ { + u.UnmarshalText(bytes) + } +} + +func BenchmarkMarshalToString(b *testing.B) { + u := NewV4() + for i := 0; i < b.N; i++ { + u.String() + } +} diff --git a/vendor/github.com/satori/go.uuid/uuid_test.go b/vendor/github.com/satori/go.uuid/uuid_test.go new file mode 100644 index 000000000000..aa68ac94f59c --- /dev/null +++ b/vendor/github.com/satori/go.uuid/uuid_test.go @@ -0,0 +1,633 @@ +// Copyright (C) 2013, 2015 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + "testing" +) + +func TestBytes(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + bytes1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + if !bytes.Equal(u.Bytes(), bytes1) { + t.Errorf("Incorrect bytes representation for UUID: %s", u) + } +} + +func TestString(t *testing.T) { + if NamespaceDNS.String() != "6ba7b810-9dad-11d1-80b4-00c04fd430c8" { + t.Errorf("Incorrect string representation for UUID: %s", NamespaceDNS.String()) + } +} + +func TestEqual(t *testing.T) { + if !Equal(NamespaceDNS, NamespaceDNS) { + t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceDNS) + } + + if Equal(NamespaceDNS, NamespaceURL) { + t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceURL) + } +} + +func TestOr(t *testing.T) { + u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff} + u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + + u := UUID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + + if !Equal(u, Or(u1, u2)) { + t.Errorf("Incorrect bitwise OR result %s", Or(u1, u2)) + } +} + +func TestAnd(t *testing.T) { + u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff} + u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + + u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if !Equal(u, And(u1, u2)) { + t.Errorf("Incorrect bitwise AND result %s", And(u1, u2)) + } +} + +func TestVersion(t *testing.T) { + u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if u.Version() != 1 { + t.Errorf("Incorrect version for UUID: %d", u.Version()) + } +} + +func TestSetVersion(t *testing.T) { + u := UUID{} + u.SetVersion(4) + + if u.Version() != 4 { + t.Errorf("Incorrect version for UUID after u.setVersion(4): %d", u.Version()) + } +} + +func TestVariant(t *testing.T) { + u1 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if u1.Variant() != VariantNCS { + t.Errorf("Incorrect variant for UUID variant %d: %d", VariantNCS, u1.Variant()) + } + + u2 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if u2.Variant() != VariantRFC4122 { + t.Errorf("Incorrect variant for UUID variant %d: %d", VariantRFC4122, u2.Variant()) + } + + u3 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if u3.Variant() != VariantMicrosoft { + t.Errorf("Incorrect variant for UUID variant %d: %d", VariantMicrosoft, u3.Variant()) + } + + u4 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if u4.Variant() != VariantFuture { + t.Errorf("Incorrect variant for UUID variant %d: %d", VariantFuture, u4.Variant()) + } +} + +func TestSetVariant(t *testing.T) { + u := new(UUID) + u.SetVariant() + + if u.Variant() != VariantRFC4122 { + t.Errorf("Incorrect variant for UUID after u.setVariant(): %d", u.Variant()) + } +} + +func TestFromBytes(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + u1, err := FromBytes(b1) + if err != nil { + t.Errorf("Error parsing UUID from bytes: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + b2 := []byte{} + + _, err = FromBytes(b2) + if err == nil { + t.Errorf("Should return error parsing from empty byte slice, got %s", err) + } +} + +func TestMarshalBinary(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + b2, err := u.MarshalBinary() + if err != nil { + t.Errorf("Error marshaling UUID: %s", err) + } + + if !bytes.Equal(b1, b2) { + t.Errorf("Marshaled UUID should be %s, got %s", b1, b2) + } +} + +func TestUnmarshalBinary(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + u1 := UUID{} + err := u1.UnmarshalBinary(b1) + if err != nil { + t.Errorf("Error unmarshaling UUID: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + b2 := []byte{} + u2 := UUID{} + + err = u2.UnmarshalBinary(b2) + if err == nil { + t.Errorf("Should return error unmarshalling from empty byte slice, got %s", err) + } +} + +func TestFromString(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + s2 := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" + s3 := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" + + _, err := FromString("") + if err == nil { + t.Errorf("Should return error trying to parse empty string, got %s", err) + } + + u1, err := FromString(s1) + if err != nil { + t.Errorf("Error parsing UUID from string: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + u2, err := FromString(s2) + if err != nil { + t.Errorf("Error parsing UUID from string: %s", err) + } + + if !Equal(u, u2) { + t.Errorf("UUIDs should be equal: %s and %s", u, u2) + } + + u3, err := FromString(s3) + if err != nil { + t.Errorf("Error parsing UUID from string: %s", err) + } + + if !Equal(u, u3) { + t.Errorf("UUIDs should be equal: %s and %s", u, u3) + } +} + +func TestFromStringShort(t *testing.T) { + // Invalid 35-character UUID string + s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c" + + for i := len(s1); i >= 0; i-- { + _, err := FromString(s1[:i]) + if err == nil { + t.Errorf("Should return error trying to parse too short string, got %s", err) + } + } +} + +func TestFromStringLong(t *testing.T) { + // Invalid 37+ character UUID string + s := []string{ + "6ba7b810-9dad-11d1-80b4-00c04fd430c8=", + "6ba7b810-9dad-11d1-80b4-00c04fd430c8}", + "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}f", + "6ba7b810-9dad-11d1-80b4-00c04fd430c800c04fd430c8", + } + + for _, str := range s { + _, err := FromString(str) + if err == nil { + t.Errorf("Should return error trying to parse too long string, passed %s", str) + } + } +} + +func TestFromStringInvalid(t *testing.T) { + // Invalid UUID string formats + s := []string{ + "6ba7b8109dad11d180b400c04fd430c8", + "6ba7b8109dad11d180b400c04fd430c86ba7b8109dad11d180b400c04fd430c8", + "urn:uuid:{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", + "6ba7b8109-dad-11d1-80b4-00c04fd430c8", + "6ba7b810-9dad1-1d1-80b4-00c04fd430c8", + "6ba7b810-9dad-11d18-0b4-00c04fd430c8", + "6ba7b810-9dad-11d1-80b40-0c04fd430c8", + "6ba7b810+9dad+11d1+80b4+00c04fd430c8", + "6ba7b810-9dad11d180b400c04fd430c8", + "6ba7b8109dad-11d180b400c04fd430c8", + "6ba7b8109dad11d1-80b400c04fd430c8", + "6ba7b8109dad11d180b4-00c04fd430c8", + } + + for _, str := range s { + _, err := FromString(str) + if err == nil { + t.Errorf("Should return error trying to parse invalid string, passed %s", str) + } + } +} + +func TestFromStringOrNil(t *testing.T) { + u := FromStringOrNil("") + if u != Nil { + t.Errorf("Should return Nil UUID on parse failure, got %s", u) + } +} + +func TestFromBytesOrNil(t *testing.T) { + b := []byte{} + u := FromBytesOrNil(b) + if u != Nil { + t.Errorf("Should return Nil UUID on parse failure, got %s", u) + } +} + +func TestMarshalText(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + + b2, err := u.MarshalText() + if err != nil { + t.Errorf("Error marshaling UUID: %s", err) + } + + if !bytes.Equal(b1, b2) { + t.Errorf("Marshaled UUID should be %s, got %s", b1, b2) + } +} + +func TestUnmarshalText(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + + u1 := UUID{} + err := u1.UnmarshalText(b1) + if err != nil { + t.Errorf("Error unmarshaling UUID: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + b2 := []byte("") + u2 := UUID{} + + err = u2.UnmarshalText(b2) + if err == nil { + t.Errorf("Should return error trying to unmarshal from empty string") + } +} + +func TestValue(t *testing.T) { + u, err := FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + if err != nil { + t.Errorf("Error parsing UUID from string: %s", err) + } + + val, err := u.Value() + if err != nil { + t.Errorf("Error getting UUID value: %s", err) + } + + if val != u.String() { + t.Errorf("Wrong value returned, should be equal: %s and %s", val, u) + } +} + +func TestValueNil(t *testing.T) { + u := UUID{} + + val, err := u.Value() + if err != nil { + t.Errorf("Error getting UUID value: %s", err) + } + + if val != Nil.String() { + t.Errorf("Wrong value returned, should be equal to UUID.Nil: %s", val) + } +} + +func TestNullUUIDValueNil(t *testing.T) { + u := NullUUID{} + + val, err := u.Value() + if err != nil { + t.Errorf("Error getting UUID value: %s", err) + } + + if val != nil { + t.Errorf("Wrong value returned, should be nil: %s", val) + } +} + +func TestScanBinary(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + u1 := UUID{} + err := u1.Scan(b1) + if err != nil { + t.Errorf("Error unmarshaling UUID: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + b2 := []byte{} + u2 := UUID{} + + err = u2.Scan(b2) + if err == nil { + t.Errorf("Should return error unmarshalling from empty byte slice, got %s", err) + } +} + +func TestScanString(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + + u1 := UUID{} + err := u1.Scan(s1) + if err != nil { + t.Errorf("Error unmarshaling UUID: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + s2 := "" + u2 := UUID{} + + err = u2.Scan(s2) + if err == nil { + t.Errorf("Should return error trying to unmarshal from empty string") + } +} + +func TestScanText(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + + u1 := UUID{} + err := u1.Scan(b1) + if err != nil { + t.Errorf("Error unmarshaling UUID: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + b2 := []byte("") + u2 := UUID{} + + err = u2.Scan(b2) + if err == nil { + t.Errorf("Should return error trying to unmarshal from empty string") + } +} + +func TestScanUnsupported(t *testing.T) { + u := UUID{} + + err := u.Scan(true) + if err == nil { + t.Errorf("Should return error trying to unmarshal from bool") + } +} + +func TestScanNil(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + err := u.Scan(nil) + if err == nil { + t.Errorf("Error UUID shouldn't allow unmarshalling from nil") + } +} + +func TestNullUUIDScanValid(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + + u1 := NullUUID{} + err := u1.Scan(s1) + if err != nil { + t.Errorf("Error unmarshaling NullUUID: %s", err) + } + + if !u1.Valid { + t.Errorf("NullUUID should be valid") + } + + if !Equal(u, u1.UUID) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1.UUID) + } +} + +func TestNullUUIDScanNil(t *testing.T) { + u := NullUUID{UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}, true} + + err := u.Scan(nil) + if err != nil { + t.Errorf("Error unmarshaling NullUUID: %s", err) + } + + if u.Valid { + t.Errorf("NullUUID should not be valid") + } + + if !Equal(u.UUID, Nil) { + t.Errorf("NullUUID value should be equal to Nil: %s", u) + } +} + +func TestNewV1(t *testing.T) { + u := NewV1() + + if u.Version() != 1 { + t.Errorf("UUIDv1 generated with incorrect version: %d", u.Version()) + } + + if u.Variant() != VariantRFC4122 { + t.Errorf("UUIDv1 generated with incorrect variant: %d", u.Variant()) + } + + u1 := NewV1() + u2 := NewV1() + + if Equal(u1, u2) { + t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u1, u2) + } + + oldFunc := epochFunc + epochFunc = func() uint64 { return 0 } + + u3 := NewV1() + u4 := NewV1() + + if Equal(u3, u4) { + t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u3, u4) + } + + epochFunc = oldFunc +} + +func TestNewV2(t *testing.T) { + u1 := NewV2(DomainPerson) + + if u1.Version() != 2 { + t.Errorf("UUIDv2 generated with incorrect version: %d", u1.Version()) + } + + if u1.Variant() != VariantRFC4122 { + t.Errorf("UUIDv2 generated with incorrect variant: %d", u1.Variant()) + } + + u2 := NewV2(DomainGroup) + + if u2.Version() != 2 { + t.Errorf("UUIDv2 generated with incorrect version: %d", u2.Version()) + } + + if u2.Variant() != VariantRFC4122 { + t.Errorf("UUIDv2 generated with incorrect variant: %d", u2.Variant()) + } +} + +func TestNewV3(t *testing.T) { + u := NewV3(NamespaceDNS, "www.example.com") + + if u.Version() != 3 { + t.Errorf("UUIDv3 generated with incorrect version: %d", u.Version()) + } + + if u.Variant() != VariantRFC4122 { + t.Errorf("UUIDv3 generated with incorrect variant: %d", u.Variant()) + } + + if u.String() != "5df41881-3aed-3515-88a7-2f4a814cf09e" { + t.Errorf("UUIDv3 generated incorrectly: %s", u.String()) + } + + u = NewV3(NamespaceDNS, "python.org") + + if u.String() != "6fa459ea-ee8a-3ca4-894e-db77e160355e" { + t.Errorf("UUIDv3 generated incorrectly: %s", u.String()) + } + + u1 := NewV3(NamespaceDNS, "golang.org") + u2 := NewV3(NamespaceDNS, "golang.org") + if !Equal(u1, u2) { + t.Errorf("UUIDv3 generated different UUIDs for same namespace and name: %s and %s", u1, u2) + } + + u3 := NewV3(NamespaceDNS, "example.com") + if Equal(u1, u3) { + t.Errorf("UUIDv3 generated same UUIDs for different names in same namespace: %s and %s", u1, u2) + } + + u4 := NewV3(NamespaceURL, "golang.org") + if Equal(u1, u4) { + t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4) + } +} + +func TestNewV4(t *testing.T) { + u := NewV4() + + if u.Version() != 4 { + t.Errorf("UUIDv4 generated with incorrect version: %d", u.Version()) + } + + if u.Variant() != VariantRFC4122 { + t.Errorf("UUIDv4 generated with incorrect variant: %d", u.Variant()) + } +} + +func TestNewV5(t *testing.T) { + u := NewV5(NamespaceDNS, "www.example.com") + + if u.Version() != 5 { + t.Errorf("UUIDv5 generated with incorrect version: %d", u.Version()) + } + + if u.Variant() != VariantRFC4122 { + t.Errorf("UUIDv5 generated with incorrect variant: %d", u.Variant()) + } + + u = NewV5(NamespaceDNS, "python.org") + + if u.String() != "886313e1-3b8a-5372-9b90-0c9aee199e5d" { + t.Errorf("UUIDv5 generated incorrectly: %s", u.String()) + } + + u1 := NewV5(NamespaceDNS, "golang.org") + u2 := NewV5(NamespaceDNS, "golang.org") + if !Equal(u1, u2) { + t.Errorf("UUIDv5 generated different UUIDs for same namespace and name: %s and %s", u1, u2) + } + + u3 := NewV5(NamespaceDNS, "example.com") + if Equal(u1, u3) { + t.Errorf("UUIDv5 generated same UUIDs for different names in same namespace: %s and %s", u1, u2) + } + + u4 := NewV5(NamespaceURL, "golang.org") + if Equal(u1, u4) { + t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4) + } +} diff --git a/vendor/github.com/shurcooL/githubql/BUILD b/vendor/github.com/shurcooL/githubql/BUILD index da8b90055ee4..7f27649303f0 100644 --- a/vendor/github.com/shurcooL/githubql/BUILD +++ b/vendor/github.com/shurcooL/githubql/BUILD @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -14,6 +14,16 @@ go_library( deps = ["//vendor/github.com/shurcooL/graphql:go_default_library"], ) +go_test( + name = "go_default_xtest", + srcs = [ + "githubql_test.go", + "scalar_test.go", + ], + importpath = "github.com/shurcooL/githubql_test", + deps = [":go_default_library"], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/vendor/github.com/shurcooL/githubql/doc.go b/vendor/github.com/shurcooL/githubql/doc.go index bba961b99c5e..5d4ae1942d93 100644 --- a/vendor/github.com/shurcooL/githubql/doc.go +++ b/vendor/github.com/shurcooL/githubql/doc.go @@ -8,6 +8,6 @@ // opportunities for improvement are discovered; it is not yet frozen. // // For now, see README for more details. -package githubql +package githubql // import "github.com/shurcooL/githubql" //go:generate go run gen.go diff --git a/vendor/github.com/shurcooL/githubql/githubql_test.go b/vendor/github.com/shurcooL/githubql/githubql_test.go new file mode 100644 index 000000000000..c0d624b2bf69 --- /dev/null +++ b/vendor/github.com/shurcooL/githubql/githubql_test.go @@ -0,0 +1,289 @@ +package githubql_test + +import ( + "context" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "reflect" + "testing" + "time" + + "github.com/shurcooL/githubql" +) + +func TestNewClient_nil(t *testing.T) { + // Shouldn't panic with nil parameter. + client := githubql.NewClient(nil) + _ = client +} + +func TestClient_Query(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("/graphql", func(w http.ResponseWriter, req *http.Request) { + if got, want := req.Method, http.MethodPost; got != want { + t.Errorf("got request method: %v, want: %v", got, want) + } + body := mustRead(req.Body) + if got, want := body, `{"query":"{viewer{login,bio}}"}`+"\n"; got != want { + t.Errorf("got body: %v, want %v", got, want) + } + mustWrite(w, `{"data": {"viewer": {"login": "gopher", "bio": "The Go gopher."}}}`) + }) + client := githubql.NewClient(&http.Client{Transport: localRoundTripper{mux: mux}}) + + type query struct { + Viewer struct { + Login githubql.String + Biography githubql.String `graphql:"bio"` // GraphQL alias. + } + } + + var q query + err := client.Query(context.Background(), &q, nil) + if err != nil { + t.Fatal(err) + } + got := q + + var want query + want.Viewer.Login = "gopher" + want.Viewer.Biography = "The Go gopher." + if !reflect.DeepEqual(got, want) { + t.Errorf("client.Query got: %v, want: %v", got, want) + } +} + +func TestClient_Query_errorResponse(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("/graphql", func(w http.ResponseWriter, req *http.Request) { + mustWrite(w, `{ + "data": null, + "errors": [ + { + "message": "Field 'bad' doesn't exist on type 'Query'", + "locations": [ + { + "line": 7, + "column": 3 + } + ] + } + ] + }`) + }) + client := githubql.NewClient(&http.Client{Transport: localRoundTripper{mux: mux}}) + + var q struct { + Bad githubql.String + } + err := client.Query(context.Background(), &q, nil) + if err == nil { + t.Fatal("got error: nil, want: non-nil") + } + if got, want := err.Error(), "Field 'bad' doesn't exist on type 'Query'"; got != want { + t.Errorf("got error: %v, want: %v", got, want) + } +} + +func TestClient_Query_errorStatusCode(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("/graphql", func(w http.ResponseWriter, req *http.Request) { + http.Error(w, "404 Not Found", http.StatusNotFound) + }) + client := githubql.NewClient(&http.Client{Transport: localRoundTripper{mux: mux}}) + + var q struct { + Viewer struct { + Login githubql.String + } + } + err := client.Query(context.Background(), &q, nil) + if err == nil { + t.Fatal("got error: nil, want: non-nil") + } + if got, want := err.Error(), "unexpected status: 404 Not Found"; got != want { + t.Errorf("got error: %v, want: %v", got, want) + } +} + +func TestClient_Query_union(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("/graphql", func(w http.ResponseWriter, req *http.Request) { + if got, want := req.Method, http.MethodPost; got != want { + t.Errorf("got request method: %v, want: %v", got, want) + } + body := mustRead(req.Body) + if got, want := body, `{"query":"query($issueNumber:Int!$repositoryName:String!$repositoryOwner:String!){repository(owner: $repositoryOwner, name: $repositoryName){issue(number: $issueNumber){timeline(first: 10){nodes{__typename,...on ClosedEvent{actor{login},createdAt},...on ReopenedEvent{actor{login},createdAt},...on RenamedTitleEvent{actor{login},createdAt,currentTitle,previousTitle}}}}}}","variables":{"issueNumber":1,"repositoryName":"go","repositoryOwner":"golang"}}`+"\n"; got != want { + t.Errorf("got body: %v, want %v", got, want) + } + mustWrite(w, `{"data": { + "repository": { + "issue": { + "timeline": { + "nodes": [ + { + "__typename": "RenamedTitleEvent", + "createdAt": "2017-06-29T04:12:01Z", + "actor": { + "login": "gopher" + }, + "currentTitle": "new", + "previousTitle": "old" + } + ] + } + } + } + }}`) + }) + client := githubql.NewClient(&http.Client{Transport: localRoundTripper{mux: mux}}) + + type event struct { // Common fields for all events. + Actor struct{ Login githubql.String } + CreatedAt githubql.DateTime + } + type issueTimelineItem struct { + Typename string `graphql:"__typename"` + ClosedEvent struct { + event + } `graphql:"...on ClosedEvent"` + ReopenedEvent struct { + event + } `graphql:"...on ReopenedEvent"` + RenamedTitleEvent struct { + event + CurrentTitle string + PreviousTitle string + } `graphql:"...on RenamedTitleEvent"` + } + type query struct { + Repository struct { + Issue struct { + Timeline struct { + Nodes []issueTimelineItem + } `graphql:"timeline(first: 10)"` + } `graphql:"issue(number: $issueNumber)"` + } `graphql:"repository(owner: $repositoryOwner, name: $repositoryName)"` + } + + var q query + variables := map[string]interface{}{ + "repositoryOwner": githubql.String("golang"), + "repositoryName": githubql.String("go"), + "issueNumber": githubql.Int(1), + } + err := client.Query(context.Background(), &q, variables) + if err != nil { + t.Fatal(err) + } + got := q + + var want query + want.Repository.Issue.Timeline.Nodes = make([]issueTimelineItem, 1) + want.Repository.Issue.Timeline.Nodes[0].Typename = "RenamedTitleEvent" + want.Repository.Issue.Timeline.Nodes[0].RenamedTitleEvent.Actor.Login = "gopher" + want.Repository.Issue.Timeline.Nodes[0].RenamedTitleEvent.CreatedAt.Time = time.Unix(1498709521, 0).UTC() + want.Repository.Issue.Timeline.Nodes[0].RenamedTitleEvent.CurrentTitle = "new" + want.Repository.Issue.Timeline.Nodes[0].RenamedTitleEvent.PreviousTitle = "old" + want.Repository.Issue.Timeline.Nodes[0].ClosedEvent.event = want.Repository.Issue.Timeline.Nodes[0].RenamedTitleEvent.event + want.Repository.Issue.Timeline.Nodes[0].ReopenedEvent.event = want.Repository.Issue.Timeline.Nodes[0].RenamedTitleEvent.event + if !reflect.DeepEqual(got, want) { + t.Errorf("client.Query:\ngot: %+v\nwant: %+v", got, want) + } +} + +func TestClient_Mutate(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("/graphql", func(w http.ResponseWriter, req *http.Request) { + if got, want := req.Method, http.MethodPost; got != want { + t.Errorf("got request method: %v, want: %v", got, want) + } + body := mustRead(req.Body) + if got, want := body, `{"query":"mutation($input:AddReactionInput!){addReaction(input:$input){reaction{content},subject{id,reactionGroups{users{totalCount}}}}}","variables":{"input":{"subjectId":"MDU6SXNzdWUyMTc5NTQ0OTc=","content":"HOORAY"}}}`+"\n"; got != want { + t.Errorf("got body: %v, want %v", got, want) + } + mustWrite(w, `{"data": { + "addReaction": { + "reaction": { + "content": "HOORAY" + }, + "subject": { + "id": "MDU6SXNzdWUyMTc5NTQ0OTc=", + "reactionGroups": [ + { + "users": {"totalCount": 3} + } + ] + } + } + }}`) + }) + client := githubql.NewClient(&http.Client{Transport: localRoundTripper{mux: mux}}) + + type reactionGroup struct { + Users struct { + TotalCount githubql.Int + } + } + type mutation struct { + AddReaction struct { + Reaction struct { + Content githubql.ReactionContent + } + Subject struct { + ID githubql.ID + ReactionGroups []reactionGroup + } + } `graphql:"addReaction(input:$input)"` + } + + var m mutation + input := githubql.AddReactionInput{ + SubjectID: "MDU6SXNzdWUyMTc5NTQ0OTc=", + Content: githubql.ReactionContentHooray, + } + err := client.Mutate(context.Background(), &m, input, nil) + if err != nil { + t.Fatal(err) + } + got := m + + var want mutation + want.AddReaction.Reaction.Content = githubql.ReactionContentHooray + want.AddReaction.Subject.ID = "MDU6SXNzdWUyMTc5NTQ0OTc=" + var rg reactionGroup + rg.Users.TotalCount = 3 + want.AddReaction.Subject.ReactionGroups = []reactionGroup{rg} + if !reflect.DeepEqual(got, want) { + t.Errorf("client.Query got: %v, want: %v", got, want) + } +} + +// localRoundTripper is an http.RoundTripper that executes HTTP transactions +// by using mux directly, instead of going over an HTTP connection. +type localRoundTripper struct { + mux *http.ServeMux +} + +func (l localRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + w := httptest.NewRecorder() + l.mux.ServeHTTP(w, req) + return w.Result(), nil +} + +func mustRead(r io.Reader) string { + b, err := ioutil.ReadAll(r) + if err != nil { + panic(err) + } + return string(b) +} + +func mustWrite(w io.Writer, s string) { + _, err := io.WriteString(w, s) + if err != nil { + panic(err) + } +} diff --git a/vendor/github.com/shurcooL/githubql/scalar_test.go b/vendor/github.com/shurcooL/githubql/scalar_test.go new file mode 100644 index 000000000000..12251a583563 --- /dev/null +++ b/vendor/github.com/shurcooL/githubql/scalar_test.go @@ -0,0 +1,117 @@ +package githubql_test + +import ( + "encoding/json" + "errors" + "net/url" + "reflect" + "testing" + + "github.com/shurcooL/githubql" +) + +func TestURI_MarshalJSON(t *testing.T) { + tests := []struct { + name string + in githubql.URI + want string + }{ + { + in: githubql.URI{URL: &url.URL{Scheme: "https", Host: "example.org", Path: "/foo/bar"}}, + want: `"https://example.org/foo/bar"`, + }, + } + for _, tc := range tests { + got, err := json.Marshal(tc.in) + if err != nil { + t.Fatalf("%s: got error: %v", tc.name, err) + } + if string(got) != tc.want { + t.Errorf("%s: got: %q, want: %q", tc.name, string(got), tc.want) + } + } +} + +func TestURI_UnmarshalJSON(t *testing.T) { + tests := []struct { + name string + in string + want githubql.URI + wantError error + }{ + { + in: `"https://example.org/foo/bar"`, + want: githubql.URI{URL: &url.URL{Scheme: "https", Host: "example.org", Path: "/foo/bar"}}, + }, + { + name: "null", + in: `null`, + want: githubql.URI{}, + }, + { + name: "error JSON unmarshaling into string", + in: `86`, + wantError: errors.New("json: cannot unmarshal number into Go value of type string"), + }, + } + for _, tc := range tests { + var got githubql.URI + err := json.Unmarshal([]byte(tc.in), &got) + if got, want := err, tc.wantError; !equalError(got, want) { + t.Fatalf("%s: got error: %v, want: %v", tc.name, got, want) + } + if tc.wantError != nil { + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s: got: %v, want: %v", tc.name, got, tc.want) + } + } +} + +// equalError reports whether errors a and b are considered equal. +// They're equal if both are nil, or both are not nil and a.Error() == b.Error(). +func equalError(a, b error) bool { + return a == nil && b == nil || a != nil && b != nil && a.Error() == b.Error() +} + +func TestNewScalars(t *testing.T) { + if got := githubql.NewBoolean(false); got == nil { + t.Error("NewBoolean returned nil") + } + if got := githubql.NewDateTime(githubql.DateTime{}); got == nil { + t.Error("NewDateTime returned nil") + } + if got := githubql.NewFloat(0.0); got == nil { + t.Error("NewFloat returned nil") + } + if got := githubql.NewGitObjectID(""); got == nil { + t.Error("NewGitObjectID returned nil") + } + if got := githubql.NewGitTimestamp(githubql.GitTimestamp{}); got == nil { + t.Error("NewGitTimestamp returned nil") + } + if got := githubql.NewHTML(""); got == nil { + t.Error("NewHTML returned nil") + } + // ID with underlying type string. + if got := githubql.NewID(""); got == nil { + t.Error("NewID returned nil") + } + // ID with underlying type int. + if got := githubql.NewID(0); got == nil { + t.Error("NewID returned nil") + } + if got := githubql.NewInt(0); got == nil { + t.Error("NewInt returned nil") + } + if got := githubql.NewString(""); got == nil { + t.Error("NewString returned nil") + } + if got := githubql.NewURI(githubql.URI{}); got == nil { + t.Error("NewURI returned nil") + } + if got := githubql.NewX509Certificate(githubql.X509Certificate{}); got == nil { + t.Error("NewX509Certificate returned nil") + } +} diff --git a/vendor/github.com/shurcooL/go/.travis.yml b/vendor/github.com/shurcooL/go/.travis.yml new file mode 100644 index 000000000000..93b1fcdb31a2 --- /dev/null +++ b/vendor/github.com/shurcooL/go/.travis.yml @@ -0,0 +1,16 @@ +sudo: false +language: go +go: + - 1.x + - master +matrix: + allow_failures: + - go: master + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/shurcooL/go/README.md b/vendor/github.com/shurcooL/go/README.md new file mode 100644 index 000000000000..36d1adf4fddd --- /dev/null +++ b/vendor/github.com/shurcooL/go/README.md @@ -0,0 +1,52 @@ +go +== + +[![Build Status](https://travis-ci.org/shurcooL/go.svg?branch=master)](https://travis-ci.org/shurcooL/go) [![GoDoc](https://godoc.org/github.com/shurcooL/go?status.svg)](https://godoc.org/github.com/shurcooL/go) + +Common Go code. + +Installation +------------ + +```bash +go get -u github.com/shurcooL/go/... +``` + +Directories +----------- + +| Path | Synopsis | +|-------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [analysis](https://godoc.org/github.com/shurcooL/go/analysis) | Package analysis provides a routine that determines if a file is generated or handcrafted. | +| [browser](https://godoc.org/github.com/shurcooL/go/browser) | Package browser provides utilities for interacting with users' browsers. | +| [ctxhttp](https://godoc.org/github.com/shurcooL/go/ctxhttp) | Package ctxhttp provides helper functions for performing context-aware HTTP requests. | +| [gddo](https://godoc.org/github.com/shurcooL/go/gddo) | Package gddo is a simple client library for accessing the godoc.org API. | +| [generated](https://godoc.org/github.com/shurcooL/go/generated) | Package generated provides a function that parses a Go file and reports whether it contains a "// Code generated ... | +| [gfmutil](https://godoc.org/github.com/shurcooL/go/gfmutil) | Package gfmutil offers functionality to render GitHub Flavored Markdown to io.Writer. | +| [gopathutil](https://godoc.org/github.com/shurcooL/go/gopathutil) | Package gopathutil provides tools to operate on GOPATH workspace. | +| [gopherjs_http](https://godoc.org/github.com/shurcooL/go/gopherjs_http) | Package gopherjs_http provides helpers for compiling Go using GopherJS and serving it over HTTP. | +| [gopherjs_http/jsutil](https://godoc.org/github.com/shurcooL/go/gopherjs_http/jsutil) | Package jsutil provides utility functions for interacting with native JavaScript APIs. | +| [importgraphutil](https://godoc.org/github.com/shurcooL/go/importgraphutil) | Package importgraphutil augments "golang.org/x/tools/refactor/importgraph" with a way to build graphs ignoring tests. | +| [indentwriter](https://godoc.org/github.com/shurcooL/go/indentwriter) | Package indentwriter implements an io.Writer wrapper that indents every non-empty line with specified number of tabs. | +| [ioutil](https://godoc.org/github.com/shurcooL/go/ioutil) | Package ioutil provides a WriteFile func with an io.Reader as input. | +| [open](https://godoc.org/github.com/shurcooL/go/open) | Package open offers ability to open files or URLs as if user double-clicked it in their OS. | +| [openutil](https://godoc.org/github.com/shurcooL/go/openutil) | Package openutil displays Markdown or HTML in a new browser tab. | +| [ospath](https://godoc.org/github.com/shurcooL/go/ospath) | Package ospath provides utilities to get OS-specific directories. | +| [osutil](https://godoc.org/github.com/shurcooL/go/osutil) | Package osutil offers a utility for manipulating a set of environment variables. | +| [parserutil](https://godoc.org/github.com/shurcooL/go/parserutil) | Package parserutil offers convenience functions for parsing Go code to AST. | +| [pipeutil](https://godoc.org/github.com/shurcooL/go/pipeutil) | Package pipeutil provides additional functionality for gopkg.in/pipe.v2 package. | +| [printerutil](https://godoc.org/github.com/shurcooL/go/printerutil) | Package printerutil provides formatted printing of AST nodes. | +| [reflectfind](https://godoc.org/github.com/shurcooL/go/reflectfind) | Package reflectfind offers funcs to perform deep-search via reflect to find instances that satisfy given query. | +| [reflectsource](https://godoc.org/github.com/shurcooL/go/reflectsource) | Package sourcereflect implements run-time source reflection, allowing a program to look up string representation of objects from the underlying .go source files. | +| [timeutil](https://godoc.org/github.com/shurcooL/go/timeutil) | Package timeutil provides a func for getting start of week of given time. | +| [trash](https://godoc.org/github.com/shurcooL/go/trash) | Package trash implements functionality to move files into trash. | +| [trim](https://godoc.org/github.com/shurcooL/go/trim) | Package trim contains helpers for trimming strings. | +| [vfs/godocfs/godocfs](https://godoc.org/github.com/shurcooL/go/vfs/godocfs/godocfs) | Package godocfs implements vfs.FileSystem using a http.FileSystem. | +| [vfs/godocfs/html/vfstemplate](https://godoc.org/github.com/shurcooL/go/vfs/godocfs/html/vfstemplate) | Package vfstemplate offers html/template helpers that use vfs.FileSystem. | +| [vfs/godocfs/path/vfspath](https://godoc.org/github.com/shurcooL/go/vfs/godocfs/path/vfspath) | Package vfspath implements utility routines for manipulating virtual file system paths. | +| [vfs/godocfs/vfsutil](https://godoc.org/github.com/shurcooL/go/vfs/godocfs/vfsutil) | Package vfsutil implements some I/O utility functions for vfs.FileSystem. | + +License +------- + +- [MIT License](https://opensource.org/licenses/mit-license.php) diff --git a/vendor/github.com/shurcooL/go/ctxhttp/BUILD b/vendor/github.com/shurcooL/go/ctxhttp/BUILD index da1cd8201a85..d197732f3cda 100644 --- a/vendor/github.com/shurcooL/go/ctxhttp/BUILD +++ b/vendor/github.com/shurcooL/go/ctxhttp/BUILD @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -7,6 +7,13 @@ go_library( visibility = ["//visibility:public"], ) +go_test( + name = "go_default_test", + srcs = ["ctxhttp_test.go"], + importpath = "github.com/shurcooL/go/ctxhttp", + library = ":go_default_library", +) + filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/vendor/github.com/shurcooL/go/ctxhttp/ctxhttp_test.go b/vendor/github.com/shurcooL/go/ctxhttp/ctxhttp_test.go new file mode 100644 index 000000000000..812efeca8f1d --- /dev/null +++ b/vendor/github.com/shurcooL/go/ctxhttp/ctxhttp_test.go @@ -0,0 +1,117 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 +// +build !plan9 + +package ctxhttp + +import ( + "context" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" +) + +func TestGo17Context(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "ok") + })) + ctx := context.Background() + resp, err := Get(ctx, http.DefaultClient, ts.URL) + if resp == nil || err != nil { + t.Fatalf("error received from client: %v %v", err, resp) + } + resp.Body.Close() +} + +const ( + requestDuration = 100 * time.Millisecond + requestBody = "ok" +) + +func okHandler(w http.ResponseWriter, r *http.Request) { + time.Sleep(requestDuration) + io.WriteString(w, requestBody) +} + +func TestNoTimeout(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(okHandler)) + defer ts.Close() + + ctx := context.Background() + res, err := Get(ctx, nil, ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(slurp) != requestBody { + t.Errorf("body = %q; want %q", slurp, requestBody) + } +} + +func TestCancelBeforeHeaders(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + blockServer := make(chan struct{}) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cancel() + <-blockServer + io.WriteString(w, requestBody) + })) + defer ts.Close() + defer close(blockServer) + + res, err := Get(ctx, nil, ts.URL) + if err == nil { + res.Body.Close() + t.Fatal("Get returned unexpected nil error") + } + if err != context.Canceled { + t.Errorf("err = %v; want %v", err, context.Canceled) + } +} + +func TestCancelAfterHangingRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.(http.Flusher).Flush() + <-w.(http.CloseNotifier).CloseNotify() + })) + defer ts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + resp, err := Get(ctx, nil, ts.URL) + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + + // Cancel befer reading the body. + // Reading Request.Body should fail, since the request was + // canceled before anything was written. + cancel() + + done := make(chan struct{}) + + go func() { + b, err := ioutil.ReadAll(resp.Body) + if len(b) != 0 || err == nil { + t.Errorf(`Read got (%q, %v); want ("", error)`, b, err) + } + close(done) + }() + + select { + case <-time.After(1 * time.Second): + t.Errorf("Test timed out") + case <-done: + } +} diff --git a/vendor/github.com/shurcooL/graphql/BUILD b/vendor/github.com/shurcooL/graphql/BUILD index e1ea36079f06..399f64251388 100644 --- a/vendor/github.com/shurcooL/graphql/BUILD +++ b/vendor/github.com/shurcooL/graphql/BUILD @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -17,6 +17,20 @@ go_library( ], ) +go_test( + name = "go_default_test", + srcs = ["query_test.go"], + importpath = "github.com/shurcooL/graphql", + library = ":go_default_library", +) + +go_test( + name = "go_default_xtest", + srcs = ["scalar_test.go"], + importpath = "github.com/shurcooL/graphql_test", + deps = [":go_default_library"], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/vendor/github.com/shurcooL/graphql/doc.go b/vendor/github.com/shurcooL/graphql/doc.go index f2605e18f874..614d20d11d00 100644 --- a/vendor/github.com/shurcooL/graphql/doc.go +++ b/vendor/github.com/shurcooL/graphql/doc.go @@ -8,4 +8,4 @@ // opportunities for improvement are discovered; it is not yet frozen. // // For now, see README for more details. -package graphql +package graphql // import "github.com/shurcooL/graphql" diff --git a/vendor/github.com/shurcooL/graphql/ident/BUILD b/vendor/github.com/shurcooL/graphql/ident/BUILD index 2fa75e209fde..f5c3fdeeb3d8 100644 --- a/vendor/github.com/shurcooL/graphql/ident/BUILD +++ b/vendor/github.com/shurcooL/graphql/ident/BUILD @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -7,6 +7,13 @@ go_library( visibility = ["//visibility:public"], ) +go_test( + name = "go_default_xtest", + srcs = ["ident_test.go"], + importpath = "github.com/shurcooL/graphql/ident_test", + deps = [":go_default_library"], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/vendor/github.com/shurcooL/graphql/ident/ident_test.go b/vendor/github.com/shurcooL/graphql/ident/ident_test.go new file mode 100644 index 000000000000..9a1562b737ef --- /dev/null +++ b/vendor/github.com/shurcooL/graphql/ident/ident_test.go @@ -0,0 +1,164 @@ +package ident_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/shurcooL/graphql/ident" +) + +func Example_lowerCamelCaseToMixedCaps() { + fmt.Println(ident.ParseLowerCamelCase("clientMutationId").ToMixedCaps()) + + // Output: ClientMutationID +} + +func Example_screamingSnakeCaseToMixedCaps() { + fmt.Println(ident.ParseScreamingSnakeCase("CLIENT_MUTATION_ID").ToMixedCaps()) + + // Output: ClientMutationID +} + +func Example_mixedCapsToLowerCamelCase() { + fmt.Println(ident.ParseMixedCaps("ClientMutationID").ToLowerCamelCase()) + + // Output: clientMutationId +} + +func TestParseMixedCaps(t *testing.T) { + tests := []struct { + in string + want ident.Name + }{ + {in: "ClientMutationID", want: ident.Name{"Client", "Mutation", "ID"}}, + {in: "StringURLAppend", want: ident.Name{"String", "URL", "Append"}}, + {in: "URLFrom", want: ident.Name{"URL", "From"}}, + {in: "SetURL", want: ident.Name{"Set", "URL"}}, + {in: "UIIP", want: ident.Name{"UI", "IP"}}, + {in: "URLHTMLFrom", want: ident.Name{"URL", "HTML", "From"}}, + {in: "SetURLHTML", want: ident.Name{"Set", "URL", "HTML"}}, + {in: "HTTPSQL", want: ident.Name{"HTTP", "SQL"}}, + {in: "HTTPSSQL", want: ident.Name{"HTTPS", "SQL"}}, + {in: "UserIDs", want: ident.Name{"User", "IDs"}}, + {in: "TeamIDsSorted", want: ident.Name{"Team", "IDs", "Sorted"}}, + } + for _, tc := range tests { + got := ident.ParseMixedCaps(tc.in) + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("got: %q, want: %q", got, tc.want) + } + } +} + +func TestParseLowerCamelCase(t *testing.T) { + tests := []struct { + in string + want ident.Name + }{ + {in: "clientMutationId", want: ident.Name{"client", "Mutation", "Id"}}, + } + for _, tc := range tests { + got := ident.ParseLowerCamelCase(tc.in) + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("got: %q, want: %q", got, tc.want) + } + } +} + +func TestParseScreamingSnakeCase(t *testing.T) { + tests := []struct { + in string + want ident.Name + }{ + {in: "CLIENT_MUTATION_ID", want: ident.Name{"CLIENT", "MUTATION", "ID"}}, + } + for _, tc := range tests { + got := ident.ParseScreamingSnakeCase(tc.in) + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("got: %q, want: %q", got, tc.want) + } + } +} + +func TestWords_ToMixedCaps(t *testing.T) { + tests := []struct { + in ident.Name + want string + }{ + {in: ident.Name{"client", "Mutation", "Id"}, want: "ClientMutationID"}, + {in: ident.Name{"CLIENT", "MUTATION", "ID"}, want: "ClientMutationID"}, + } + for _, tc := range tests { + got := tc.in.ToMixedCaps() + if got != tc.want { + t.Errorf("got: %q, want: %q", got, tc.want) + } + } +} + +func TestWords_ToLowerCamelCase(t *testing.T) { + tests := []struct { + in ident.Name + want string + }{ + {in: ident.Name{"client", "Mutation", "Id"}, want: "clientMutationId"}, + {in: ident.Name{"CLIENT", "MUTATION", "ID"}, want: "clientMutationId"}, + } + for _, tc := range tests { + got := tc.in.ToLowerCamelCase() + if got != tc.want { + t.Errorf("got: %q, want: %q", got, tc.want) + } + } +} + +func TestMixedCapsToLowerCamelCase(t *testing.T) { + tests := []struct { + in string + want string + }{ + {in: "DatabaseID", want: "databaseId"}, + {in: "URL", want: "url"}, + {in: "ID", want: "id"}, + {in: "CreatedAt", want: "createdAt"}, + {in: "Login", want: "login"}, + {in: "ResetAt", want: "resetAt"}, + {in: "ID", want: "id"}, + {in: "IDs", want: "ids"}, + {in: "IDsAndNames", want: "idsAndNames"}, + {in: "UserIDs", want: "userIds"}, + {in: "TeamIDsSorted", want: "teamIdsSorted"}, + } + for _, tc := range tests { + got := ident.ParseMixedCaps(tc.in).ToLowerCamelCase() + if got != tc.want { + t.Errorf("got: %q, want: %q", got, tc.want) + } + } +} + +func TestLowerCamelCaseToMixedCaps(t *testing.T) { + tests := []struct { + in string + want string + }{ + {in: "databaseId", want: "DatabaseID"}, + {in: "url", want: "URL"}, + {in: "id", want: "ID"}, + {in: "createdAt", want: "CreatedAt"}, + {in: "login", want: "Login"}, + {in: "resetAt", want: "ResetAt"}, + {in: "id", want: "ID"}, + {in: "ids", want: "IDs"}, + {in: "idsAndNames", want: "IDsAndNames"}, + {in: "userIds", want: "UserIDs"}, + {in: "teamIdsSorted", want: "TeamIDsSorted"}, + } + for _, tc := range tests { + got := ident.ParseLowerCamelCase(tc.in).ToMixedCaps() + if got != tc.want { + t.Errorf("got: %q, want: %q", got, tc.want) + } + } +} diff --git a/vendor/github.com/shurcooL/graphql/internal/jsonutil/BUILD b/vendor/github.com/shurcooL/graphql/internal/jsonutil/BUILD index fb8c83e2d3b6..cd29c18e03e2 100644 --- a/vendor/github.com/shurcooL/graphql/internal/jsonutil/BUILD +++ b/vendor/github.com/shurcooL/graphql/internal/jsonutil/BUILD @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -7,6 +7,19 @@ go_library( visibility = ["//vendor/github.com/shurcooL/graphql:__subpackages__"], ) +go_test( + name = "go_default_xtest", + srcs = [ + "benchmark_test.go", + "graphql_test.go", + ], + importpath = "github.com/shurcooL/graphql/internal/jsonutil_test", + deps = [ + ":go_default_library", + "//vendor/github.com/shurcooL/graphql:go_default_library", + ], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/vendor/github.com/shurcooL/graphql/internal/jsonutil/benchmark_test.go b/vendor/github.com/shurcooL/graphql/internal/jsonutil/benchmark_test.go new file mode 100644 index 000000000000..f8788b169d98 --- /dev/null +++ b/vendor/github.com/shurcooL/graphql/internal/jsonutil/benchmark_test.go @@ -0,0 +1,127 @@ +package jsonutil_test + +import ( + "encoding/json" + "io" + "reflect" + "strings" + "testing" + "time" + + "github.com/shurcooL/graphql" + "github.com/shurcooL/graphql/internal/jsonutil" +) + +func TestUnmarshalGraphQL_benchmark(t *testing.T) { + /* + query { + viewer { + login + createdAt + } + } + */ + type query struct { + Viewer struct { + Login graphql.String + CreatedAt time.Time + } + } + var got query + err := jsonutil.UnmarshalGraphQL([]byte(`{ + "viewer": { + "login": "shurcooL-test", + "createdAt": "2017-06-29T04:12:01Z" + } + }`), &got) + if err != nil { + t.Fatal(err) + } + var want query + want.Viewer.Login = "shurcooL-test" + want.Viewer.CreatedAt = time.Unix(1498709521, 0).UTC() + if !reflect.DeepEqual(got, want) { + t.Error("not equal") + } +} + +func BenchmarkUnmarshalGraphQL(b *testing.B) { + type query struct { + Viewer struct { + Login graphql.String + CreatedAt time.Time + } + } + for i := 0; i < b.N; i++ { + now := time.Now().UTC() + var got query + err := jsonutil.UnmarshalGraphQL([]byte(`{ + "viewer": { + "login": "shurcooL-test", + "createdAt": "`+now.Format(time.RFC3339Nano)+`" + } + }`), &got) + if err != nil { + b.Fatal(err) + } + var want query + want.Viewer.Login = "shurcooL-test" + want.Viewer.CreatedAt = now + if !reflect.DeepEqual(got, want) { + b.Error("not equal") + } + } +} + +func BenchmarkJSONUnmarshal(b *testing.B) { + type query struct { + Viewer struct { + Login graphql.String + CreatedAt time.Time + } + } + for i := 0; i < b.N; i++ { + now := time.Now().UTC() + var got query + err := json.Unmarshal([]byte(`{ + "viewer": { + "login": "shurcooL-test", + "createdAt": "`+now.Format(time.RFC3339Nano)+`" + } + }`), &got) + if err != nil { + b.Fatal(err) + } + var want query + want.Viewer.Login = "shurcooL-test" + want.Viewer.CreatedAt = now + if !reflect.DeepEqual(got, want) { + b.Error("not equal") + } + } +} + +func BenchmarkJSONTokenize(b *testing.B) { + for i := 0; i < b.N; i++ { + now := time.Now().UTC() + dec := json.NewDecoder(strings.NewReader(`{ + "viewer": { + "login": "shurcooL-test", + "createdAt": "` + now.Format(time.RFC3339Nano) + `" + } + }`)) + var tokens int + for { + _, err := dec.Token() + if err == io.EOF { + break + } else if err != nil { + b.Error(err) + } + tokens++ + } + if tokens != 9 { + b.Error("not 9 tokens") + } + } +} diff --git a/vendor/github.com/shurcooL/graphql/internal/jsonutil/graphql_test.go b/vendor/github.com/shurcooL/graphql/internal/jsonutil/graphql_test.go new file mode 100644 index 000000000000..63b0650b3a91 --- /dev/null +++ b/vendor/github.com/shurcooL/graphql/internal/jsonutil/graphql_test.go @@ -0,0 +1,336 @@ +package jsonutil_test + +import ( + "reflect" + "testing" + "time" + + "github.com/shurcooL/graphql" + "github.com/shurcooL/graphql/internal/jsonutil" +) + +func TestUnmarshalGraphQL(t *testing.T) { + /* + query { + me { + name + height + } + } + */ + type query struct { + Me struct { + Name graphql.String + Height graphql.Float + } + } + var got query + err := jsonutil.UnmarshalGraphQL([]byte(`{ + "me": { + "name": "Luke Skywalker", + "height": 1.72 + } + }`), &got) + if err != nil { + t.Fatal(err) + } + var want query + want.Me.Name = "Luke Skywalker" + want.Me.Height = 1.72 + if !reflect.DeepEqual(got, want) { + t.Error("not equal") + } +} + +func TestUnmarshalGraphQL_graphqlTag(t *testing.T) { + type query struct { + Foo graphql.String `graphql:"baz"` + } + var got query + err := jsonutil.UnmarshalGraphQL([]byte(`{ + "baz": "bar" + }`), &got) + if err != nil { + t.Fatal(err) + } + want := query{ + Foo: "bar", + } + if !reflect.DeepEqual(got, want) { + t.Error("not equal") + } +} + +func TestUnmarshalGraphQL_jsonTag(t *testing.T) { + type query struct { + Foo graphql.String `json:"baz"` + } + var got query + err := jsonutil.UnmarshalGraphQL([]byte(`{ + "foo": "bar" + }`), &got) + if err != nil { + t.Fatal(err) + } + want := query{ + Foo: "bar", + } + if !reflect.DeepEqual(got, want) { + t.Error("not equal") + } +} + +func TestUnmarshalGraphQL_array(t *testing.T) { + type query struct { + Foo []graphql.String + } + var got query + err := jsonutil.UnmarshalGraphQL([]byte(`{ + "foo": [ + "bar", + "baz" + ] + }`), &got) + if err != nil { + t.Fatal(err) + } + want := query{ + Foo: []graphql.String{"bar", "baz"}, + } + if !reflect.DeepEqual(got, want) { + t.Error("not equal") + } +} + +// When unmarshaling into an array, its initial value should be overwritten +// (rather than appended to). +func TestUnmarshalGraphQL_arrayReset(t *testing.T) { + var got = []string{"initial"} + err := jsonutil.UnmarshalGraphQL([]byte(`["bar", "baz"]`), &got) + if err != nil { + t.Fatal(err) + } + want := []string{"bar", "baz"} + if !reflect.DeepEqual(got, want) { + t.Error("not equal") + } +} + +func TestUnmarshalGraphQL_objectArray(t *testing.T) { + type query struct { + Foo []struct { + Name graphql.String + } + } + var got query + err := jsonutil.UnmarshalGraphQL([]byte(`{ + "foo": [ + {"name": "bar"}, + {"name": "baz"} + ] + }`), &got) + if err != nil { + t.Fatal(err) + } + want := query{ + Foo: []struct{ Name graphql.String }{ + {"bar"}, + {"baz"}, + }, + } + if !reflect.DeepEqual(got, want) { + t.Error("not equal") + } +} + +func TestUnmarshalGraphQL_pointer(t *testing.T) { + type query struct { + Foo *graphql.String + Bar *graphql.String + } + var got query + got.Bar = new(graphql.String) // Test that got.Bar gets set to nil. + err := jsonutil.UnmarshalGraphQL([]byte(`{ + "foo": "foo", + "bar": null + }`), &got) + if err != nil { + t.Fatal(err) + } + want := query{ + Foo: graphql.NewString("foo"), + Bar: nil, + } + if !reflect.DeepEqual(got, want) { + t.Error("not equal") + } +} + +func TestUnmarshalGraphQL_objectPointerArray(t *testing.T) { + type query struct { + Foo []*struct { + Name graphql.String + } + } + var got query + err := jsonutil.UnmarshalGraphQL([]byte(`{ + "foo": [ + {"name": "bar"}, + null, + {"name": "baz"} + ] + }`), &got) + if err != nil { + t.Fatal(err) + } + want := query{ + Foo: []*struct{ Name graphql.String }{ + {"bar"}, + nil, + {"baz"}, + }, + } + if !reflect.DeepEqual(got, want) { + t.Error("not equal") + } +} + +func TestUnmarshalGraphQL_multipleValues(t *testing.T) { + type query struct { + Foo graphql.String + } + err := jsonutil.UnmarshalGraphQL([]byte(`{"foo": "bar"}{"foo": "baz"}`), new(query)) + if err == nil { + t.Fatal("got error: nil, want: non-nil") + } + if got, want := err.Error(), "invalid token '{' after top-level value"; got != want { + t.Errorf("got error: %v, want: %v", got, want) + } +} + +func TestUnmarshalGraphQL_union(t *testing.T) { + /* + { + __typename + ... on ClosedEvent { + createdAt + actor {login} + } + ... on ReopenedEvent { + createdAt + actor {login} + } + } + */ + type actor struct{ Login graphql.String } + type closedEvent struct { + Actor actor + CreatedAt time.Time + } + type reopenedEvent struct { + Actor actor + CreatedAt time.Time + } + type issueTimelineItem struct { + Typename string `graphql:"__typename"` + ClosedEvent closedEvent `graphql:"... on ClosedEvent"` + ReopenedEvent reopenedEvent `graphql:"... on ReopenedEvent"` + } + var got issueTimelineItem + err := jsonutil.UnmarshalGraphQL([]byte(`{ + "__typename": "ClosedEvent", + "createdAt": "2017-06-29T04:12:01Z", + "actor": { + "login": "shurcooL-test" + } + }`), &got) + if err != nil { + t.Fatal(err) + } + want := issueTimelineItem{ + Typename: "ClosedEvent", + ClosedEvent: closedEvent{ + Actor: actor{ + Login: "shurcooL-test", + }, + CreatedAt: time.Unix(1498709521, 0).UTC(), + }, + ReopenedEvent: reopenedEvent{ + Actor: actor{ + Login: "shurcooL-test", + }, + CreatedAt: time.Unix(1498709521, 0).UTC(), + }, + } + if !reflect.DeepEqual(got, want) { + t.Error("not equal") + } +} + +// Issue https://github.com/shurcooL/githubql/issues/18. +func TestUnmarshalGraphQL_arrayInsideInlineFragment(t *testing.T) { + /* + query { + search(type: ISSUE, first: 1, query: "type:pr repo:owner/name") { + nodes { + ... on PullRequest { + commits(last: 1) { + nodes { + url + } + } + } + } + } + } + */ + type query struct { + Search struct { + Nodes []struct { + PullRequest struct { + Commits struct { + Nodes []struct { + URL string `graphql:"url"` + } + } `graphql:"commits(last: 1)"` + } `graphql:"... on PullRequest"` + } + } `graphql:"search(type: ISSUE, first: 1, query: \"type:pr repo:owner/name\")"` + } + var got query + err := jsonutil.UnmarshalGraphQL([]byte(`{ + "search": { + "nodes": [ + { + "commits": { + "nodes": [ + { + "url": "https://example.org/commit/49e1" + } + ] + } + } + ] + } + }`), &got) + if err != nil { + t.Fatal(err) + } + var want query + want.Search.Nodes = make([]struct { + PullRequest struct { + Commits struct { + Nodes []struct { + URL string `graphql:"url"` + } + } `graphql:"commits(last: 1)"` + } `graphql:"... on PullRequest"` + }, 1) + want.Search.Nodes[0].PullRequest.Commits.Nodes = make([]struct { + URL string `graphql:"url"` + }, 1) + want.Search.Nodes[0].PullRequest.Commits.Nodes[0].URL = "https://example.org/commit/49e1" + if !reflect.DeepEqual(got, want) { + t.Error("not equal") + } +} diff --git a/vendor/github.com/shurcooL/graphql/query_test.go b/vendor/github.com/shurcooL/graphql/query_test.go new file mode 100644 index 000000000000..536b3008504e --- /dev/null +++ b/vendor/github.com/shurcooL/graphql/query_test.go @@ -0,0 +1,345 @@ +package graphql + +import ( + "net/url" + "reflect" + "testing" + "time" +) + +func TestConstructQuery(t *testing.T) { + tests := []struct { + inV interface{} + inVariables map[string]interface{} + want string + }{ + { + inV: struct { + Viewer struct { + Login String + CreatedAt DateTime + ID ID + DatabaseID Int + } + RateLimit struct { + Cost Int + Limit Int + Remaining Int + ResetAt DateTime + } + }{}, + want: `{viewer{login,createdAt,id,databaseId},rateLimit{cost,limit,remaining,resetAt}}`, + }, + { + inV: struct { + Repository struct { + DatabaseID Int + URL URI + + Issue struct { + Comments struct { + Edges []struct { + Node struct { + Body String + Author struct { + Login String + } + Editor struct { + Login String + } + } + Cursor String + } + } `graphql:"comments(first:1after:\"Y3Vyc29yOjE5NTE4NDI1Ng==\")"` + } `graphql:"issue(number:1)"` + } `graphql:"repository(owner:\"shurcooL-test\"name:\"test-repo\")"` + }{}, + want: `{repository(owner:"shurcooL-test"name:"test-repo"){databaseId,url,issue(number:1){comments(first:1after:"Y3Vyc29yOjE5NTE4NDI1Ng=="){edges{node{body,author{login},editor{login}},cursor}}}}}`, + }, + { + inV: func() interface{} { + type actor struct { + Login String + AvatarURL URI + URL URI + } + + return struct { + Repository struct { + DatabaseID Int + URL URI + + Issue struct { + Comments struct { + Edges []struct { + Node struct { + DatabaseID Int + Author actor + PublishedAt DateTime + LastEditedAt *DateTime + Editor *actor + Body String + ViewerCanUpdate Boolean + } + Cursor String + } + } `graphql:"comments(first:1)"` + } `graphql:"issue(number:1)"` + } `graphql:"repository(owner:\"shurcooL-test\"name:\"test-repo\")"` + }{} + }(), + want: `{repository(owner:"shurcooL-test"name:"test-repo"){databaseId,url,issue(number:1){comments(first:1){edges{node{databaseId,author{login,avatarUrl,url},publishedAt,lastEditedAt,editor{login,avatarUrl,url},body,viewerCanUpdate},cursor}}}}}`, + }, + { + inV: func() interface{} { + type actor struct { + Login String + AvatarURL URI `graphql:"avatarUrl(size:72)"` + URL URI + } + + return struct { + Repository struct { + Issue struct { + Author actor + PublishedAt DateTime + LastEditedAt *DateTime + Editor *actor + Body String + ReactionGroups []struct { + Content ReactionContent + Users struct { + TotalCount Int + } + ViewerHasReacted Boolean + } + ViewerCanUpdate Boolean + + Comments struct { + Nodes []struct { + DatabaseID Int + Author actor + PublishedAt DateTime + LastEditedAt *DateTime + Editor *actor + Body String + ReactionGroups []struct { + Content ReactionContent + Users struct { + TotalCount Int + } + ViewerHasReacted Boolean + } + ViewerCanUpdate Boolean + } + PageInfo struct { + EndCursor String + HasNextPage Boolean + } + } `graphql:"comments(first:1)"` + } `graphql:"issue(number:1)"` + } `graphql:"repository(owner:\"shurcooL-test\"name:\"test-repo\")"` + }{} + }(), + want: `{repository(owner:"shurcooL-test"name:"test-repo"){issue(number:1){author{login,avatarUrl(size:72),url},publishedAt,lastEditedAt,editor{login,avatarUrl(size:72),url},body,reactionGroups{content,users{totalCount},viewerHasReacted},viewerCanUpdate,comments(first:1){nodes{databaseId,author{login,avatarUrl(size:72),url},publishedAt,lastEditedAt,editor{login,avatarUrl(size:72),url},body,reactionGroups{content,users{totalCount},viewerHasReacted},viewerCanUpdate},pageInfo{endCursor,hasNextPage}}}}}`, + }, + { + inV: struct { + Repository struct { + Issue struct { + Body String + } `graphql:"issue(number: 1)"` + } `graphql:"repository(owner:\"shurcooL-test\"name:\"test-repo\")"` + }{}, + want: `{repository(owner:"shurcooL-test"name:"test-repo"){issue(number: 1){body}}}`, + }, + { + inV: struct { + Repository struct { + Issue struct { + Body String + } `graphql:"issue(number: $issueNumber)"` + } `graphql:"repository(owner: $repositoryOwner, name: $repositoryName)"` + }{}, + inVariables: map[string]interface{}{ + "repositoryOwner": String("shurcooL-test"), + "repositoryName": String("test-repo"), + "issueNumber": Int(1), + }, + want: `query($issueNumber:Int!$repositoryName:String!$repositoryOwner:String!){repository(owner: $repositoryOwner, name: $repositoryName){issue(number: $issueNumber){body}}}`, + }, + { + inV: struct { + Repository struct { + Issue struct { + ReactionGroups []struct { + Users struct { + Nodes []struct { + Login String + } + } `graphql:"users(first:10)"` + } + } `graphql:"issue(number: $issueNumber)"` + } `graphql:"repository(owner: $repositoryOwner, name: $repositoryName)"` + }{}, + inVariables: map[string]interface{}{ + "repositoryOwner": String("shurcooL-test"), + "repositoryName": String("test-repo"), + "issueNumber": Int(1), + }, + want: `query($issueNumber:Int!$repositoryName:String!$repositoryOwner:String!){repository(owner: $repositoryOwner, name: $repositoryName){issue(number: $issueNumber){reactionGroups{users(first:10){nodes{login}}}}}}`, + }, + // Embedded structs without graphql tag should be inlined in query. + { + inV: func() interface{} { + type actor struct { + Login String + AvatarURL URI + URL URI + } + type event struct { // Common fields for all events. + Actor actor + CreatedAt DateTime + } + type IssueComment struct { + Body String + } + return struct { + event // Should be inlined. + IssueComment `graphql:"... on IssueComment"` // Should not be, because of graphql tag. + CurrentTitle String + PreviousTitle String + Label struct { + Name String + Color String + } + }{} + }(), + want: `{actor{login,avatarUrl,url},createdAt,... on IssueComment{body},currentTitle,previousTitle,label{name,color}}`, + }, + } + for _, tc := range tests { + qctx := &queryContext{ + Scalars: []reflect.Type{ + reflect.TypeOf(DateTime{}), + reflect.TypeOf(URI{}), + }, + } + got := constructQuery(qctx, tc.inV, tc.inVariables) + if got != tc.want { + t.Errorf("\ngot: %q\nwant: %q\n", got, tc.want) + } + } +} + +func TestConstructMutation(t *testing.T) { + tests := []struct { + inV interface{} + inVariables map[string]interface{} + want string + }{ + { + inV: struct { + AddReaction struct { + Subject struct { + ReactionGroups []struct { + Users struct { + TotalCount Int + } + } + } + } `graphql:"addReaction(input:$input)"` + }{}, + inVariables: map[string]interface{}{ + "input": AddReactionInput{ + SubjectID: "MDU6SXNzdWUyMzE1MjcyNzk=", + Content: ReactionContentThumbsUp, + }, + }, + want: `mutation($input:AddReactionInput!){addReaction(input:$input){subject{reactionGroups{users{totalCount}}}}}`, + }, + } + for _, tc := range tests { + got := constructMutation(&queryContext{}, tc.inV, tc.inVariables) + if got != tc.want { + t.Errorf("\ngot: %q\nwant: %q\n", got, tc.want) + } + } +} + +func TestQueryArguments(t *testing.T) { + tests := []struct { + name string + in map[string]interface{} + want string + }{ + { + in: map[string]interface{}{"a": Int(123), "b": NewBoolean(true)}, + want: "$a:Int!$b:Boolean", + }, + { + in: map[string]interface{}{"states": []IssueState{IssueStateOpen, IssueStateClosed}}, + want: "$states:[IssueState!]", + }, + { + in: map[string]interface{}{"states": []IssueState(nil)}, + want: "$states:[IssueState!]", + }, + { + in: map[string]interface{}{"states": [...]IssueState{IssueStateOpen, IssueStateClosed}}, + want: "$states:[IssueState!]", + }, + { + in: map[string]interface{}{"id": ID("someid")}, + want: "$id:ID!", + }, + } + for _, tc := range tests { + got := queryArguments(tc.in) + if got != tc.want { + t.Errorf("%s: got: %q, want: %q", tc.name, got, tc.want) + } + } +} + +// Custom GraphQL types for testing. +type ( + // DateTime is an ISO-8601 encoded UTC date. + DateTime struct{ time.Time } + + // URI is an RFC 3986, RFC 3987, and RFC 6570 (level 4) compliant URI. + URI struct{ *url.URL } +) + +// IssueState represents the possible states of an issue. +type IssueState string + +// The possible states of an issue. +const ( + IssueStateOpen IssueState = "OPEN" // An issue that is still open. + IssueStateClosed IssueState = "CLOSED" // An issue that has been closed. +) + +// ReactionContent represents emojis that can be attached to Issues, Pull Requests and Comments. +type ReactionContent string + +// Emojis that can be attached to Issues, Pull Requests and Comments. +const ( + ReactionContentThumbsUp ReactionContent = "THUMBS_UP" // Represents the 👍 emoji. + ReactionContentThumbsDown ReactionContent = "THUMBS_DOWN" // Represents the 👎 emoji. + ReactionContentLaugh ReactionContent = "LAUGH" // Represents the 😄 emoji. + ReactionContentHooray ReactionContent = "HOORAY" // Represents the 🎉 emoji. + ReactionContentConfused ReactionContent = "CONFUSED" // Represents the 😕 emoji. + ReactionContentHeart ReactionContent = "HEART" // Represents the ❤️ emoji. +) + +// AddReactionInput is an autogenerated input type of AddReaction. +type AddReactionInput struct { + // The Node ID of the subject to modify. (Required.) + SubjectID ID `json:"subjectId"` + // The name of the emoji to react with. (Required.) + Content ReactionContent `json:"content"` + + // A unique identifier for the client performing the mutation. (Optional.) + ClientMutationID *String `json:"clientMutationId,omitempty"` +} diff --git a/vendor/github.com/shurcooL/graphql/scalar_test.go b/vendor/github.com/shurcooL/graphql/scalar_test.go new file mode 100644 index 000000000000..8334b960167f --- /dev/null +++ b/vendor/github.com/shurcooL/graphql/scalar_test.go @@ -0,0 +1,30 @@ +package graphql_test + +import ( + "testing" + + "github.com/shurcooL/graphql" +) + +func TestNewScalars(t *testing.T) { + if got := graphql.NewBoolean(false); got == nil { + t.Error("NewBoolean returned nil") + } + if got := graphql.NewFloat(0.0); got == nil { + t.Error("NewFloat returned nil") + } + // ID with underlying type string. + if got := graphql.NewID(""); got == nil { + t.Error("NewID returned nil") + } + // ID with underlying type int. + if got := graphql.NewID(0); got == nil { + t.Error("NewID returned nil") + } + if got := graphql.NewInt(0); got == nil { + t.Error("NewInt returned nil") + } + if got := graphql.NewString(""); got == nil { + t.Error("NewString returned nil") + } +} diff --git a/vendor/github.com/sirupsen/logrus/BUILD b/vendor/github.com/sirupsen/logrus/BUILD index e8a7a25cb617..83e0e4e53fab 100644 --- a/vendor/github.com/sirupsen/logrus/BUILD +++ b/vendor/github.com/sirupsen/logrus/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -34,6 +29,23 @@ go_library( "//conditions:default": [], }), importpath = "github.com/sirupsen/logrus", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = [ + "alt_exit_test.go", + "entry_test.go", + "formatter_bench_test.go", + "hook_test.go", + "json_formatter_test.go", + "logrus_test.go", + "text_formatter_test.go", + ], + importpath = "github.com/sirupsen/logrus", + library = ":go_default_library", + deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], ) filegroup( @@ -47,4 +59,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/sirupsen/logrus/alt_exit_test.go b/vendor/github.com/sirupsen/logrus/alt_exit_test.go new file mode 100644 index 000000000000..022b778303b7 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/alt_exit_test.go @@ -0,0 +1,74 @@ +package logrus + +import ( + "io/ioutil" + "os/exec" + "testing" + "time" +) + +func TestRegister(t *testing.T) { + current := len(handlers) + RegisterExitHandler(func() {}) + if len(handlers) != current+1 { + t.Fatalf("can't add handler") + } +} + +func TestHandler(t *testing.T) { + gofile := "/tmp/testprog.go" + if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil { + t.Fatalf("can't create go file") + } + + outfile := "/tmp/testprog.out" + arg := time.Now().UTC().String() + err := exec.Command("go", "run", gofile, outfile, arg).Run() + if err == nil { + t.Fatalf("completed normally, should have failed") + } + + data, err := ioutil.ReadFile(outfile) + if err != nil { + t.Fatalf("can't read output file %s", outfile) + } + + if string(data) != arg { + t.Fatalf("bad data") + } +} + +var testprog = []byte(` +// Test program for atexit, gets output file and data as arguments and writes +// data to output file in atexit handler. +package main + +import ( + "github.com/Sirupsen/logrus" + "flag" + "fmt" + "io/ioutil" +) + +var outfile = "" +var data = "" + +func handler() { + ioutil.WriteFile(outfile, []byte(data), 0666) +} + +func badHandler() { + n := 0 + fmt.Println(1/n) +} + +func main() { + flag.Parse() + outfile = flag.Arg(0) + data = flag.Arg(1) + + logrus.RegisterExitHandler(handler) + logrus.RegisterExitHandler(badHandler) + logrus.Fatal("Bye bye") +} +`) diff --git a/vendor/github.com/sirupsen/logrus/entry_test.go b/vendor/github.com/sirupsen/logrus/entry_test.go new file mode 100644 index 000000000000..99c3b41d5f57 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/entry_test.go @@ -0,0 +1,77 @@ +package logrus + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEntryWithError(t *testing.T) { + + assert := assert.New(t) + + defer func() { + ErrorKey = "error" + }() + + err := fmt.Errorf("kaboom at layer %d", 4711) + + assert.Equal(err, WithError(err).Data["error"]) + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + + assert.Equal(err, entry.WithError(err).Data["error"]) + + ErrorKey = "err" + + assert.Equal(err, entry.WithError(err).Data["err"]) + +} + +func TestEntryPanicln(t *testing.T) { + errBoom := fmt.Errorf("boom time") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicln("kaboom") +} + +func TestEntryPanicf(t *testing.T) { + errBoom := fmt.Errorf("boom again") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom true", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicf("kaboom %v", true) +} diff --git a/vendor/github.com/sirupsen/logrus/formatter_bench_test.go b/vendor/github.com/sirupsen/logrus/formatter_bench_test.go new file mode 100644 index 000000000000..c6d290c77f09 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/formatter_bench_test.go @@ -0,0 +1,98 @@ +package logrus + +import ( + "fmt" + "testing" + "time" +) + +// smallFields is a small size data set for benchmarking +var smallFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", +} + +// largeFields is a large size data set for benchmarking +var largeFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", + "five": "six", + "seven": "eight", + "nine": "ten", + "eleven": "twelve", + "thirteen": "fourteen", + "fifteen": "sixteen", + "seventeen": "eighteen", + "nineteen": "twenty", + "a": "b", + "c": "d", + "e": "f", + "g": "h", + "i": "j", + "k": "l", + "m": "n", + "o": "p", + "q": "r", + "s": "t", + "u": "v", + "w": "x", + "y": "z", + "this": "will", + "make": "thirty", + "entries": "yeah", +} + +var errorFields = Fields{ + "foo": fmt.Errorf("bar"), + "baz": fmt.Errorf("qux"), +} + +func BenchmarkErrorTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) +} + +func BenchmarkSmallTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) +} + +func BenchmarkLargeTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) +} + +func BenchmarkSmallColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) +} + +func BenchmarkLargeColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) +} + +func BenchmarkSmallJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, smallFields) +} + +func BenchmarkLargeJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, largeFields) +} + +func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { + entry := &Entry{ + Time: time.Time{}, + Level: InfoLevel, + Message: "message", + Data: fields, + } + var d []byte + var err error + for i := 0; i < b.N; i++ { + d, err = formatter.Format(entry) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(d))) + } +} diff --git a/vendor/github.com/sirupsen/logrus/hook_test.go b/vendor/github.com/sirupsen/logrus/hook_test.go new file mode 100644 index 000000000000..13f34cb6f815 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/hook_test.go @@ -0,0 +1,122 @@ +package logrus + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestHook struct { + Fired bool +} + +func (hook *TestHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *TestHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookFires(t *testing.T) { + hook := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + assert.Equal(t, hook.Fired, false) + + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} + +type ModifyHook struct { +} + +func (hook *ModifyHook) Fire(entry *Entry) error { + entry.Data["wow"] = "whale" + return nil +} + +func (hook *ModifyHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookCanModifyEntry(t *testing.T) { + hook := new(ModifyHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + }) +} + +func TestCanFireMultipleHooks(t *testing.T) { + hook1 := new(ModifyHook) + hook2 := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook1) + log.Hooks.Add(hook2) + + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + assert.Equal(t, hook2.Fired, true) + }) +} + +type ErrorHook struct { + Fired bool +} + +func (hook *ErrorHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *ErrorHook) Levels() []Level { + return []Level{ + ErrorLevel, + } +} + +func TestErrorHookShouldntFireOnInfo(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, false) + }) +} + +func TestErrorHookShouldFireOnError(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Error("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} diff --git a/vendor/github.com/sirupsen/logrus/json_formatter_test.go b/vendor/github.com/sirupsen/logrus/json_formatter_test.go new file mode 100644 index 000000000000..1d70873254d7 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/json_formatter_test.go @@ -0,0 +1,120 @@ +package logrus + +import ( + "encoding/json" + "errors" + + "testing" +) + +func TestErrorNotLost(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["error"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["omg"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestFieldClashWithTime(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("time", "right now!")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.time"] != "right now!" { + t.Fatal("fields.time not set to original time field") + } + + if entry["time"] != "0001-01-01T00:00:00Z" { + t.Fatal("time field not set to current time, was: ", entry["time"]) + } +} + +func TestFieldClashWithMsg(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("msg", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.msg"] != "something" { + t.Fatal("fields.msg not set to original msg field") + } +} + +func TestFieldClashWithLevel(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.level"] != "something" { + t.Fatal("fields.level not set to original level field") + } +} + +func TestJSONEntryEndsWithNewline(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + if b[len(b)-1] != '\n' { + t.Fatal("Expected JSON log entry to end with a newline") + } +} diff --git a/vendor/github.com/sirupsen/logrus/logrus_test.go b/vendor/github.com/sirupsen/logrus/logrus_test.go new file mode 100644 index 000000000000..bfc478055ea7 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logrus_test.go @@ -0,0 +1,361 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "strconv" + "strings" + "sync" + "testing" + + "github.com/stretchr/testify/assert" +) + +func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + log(logger) + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assertions(fields) +} + +func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { + var buffer bytes.Buffer + + logger := New() + logger.Out = &buffer + logger.Formatter = &TextFormatter{ + DisableColors: true, + } + + log(logger) + + fields := make(map[string]string) + for _, kv := range strings.Split(buffer.String(), " ") { + if !strings.Contains(kv, "=") { + continue + } + kvArr := strings.Split(kv, "=") + key := strings.TrimSpace(kvArr[0]) + val := kvArr[1] + if kvArr[1][0] == '"' { + var err error + val, err = strconv.Unquote(val) + assert.NoError(t, err) + } + fields[key] = val + } + assertions(fields) +} + +func TestPrint(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestInfo(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestWarn(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Warn("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "warning") + }) +} + +func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test test") + }) +} + +func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test 10") + }) +} + +func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "testtest") + }) +} + +func TestWithFieldsShouldAllowAssignments(t *testing.T) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + localLog := logger.WithFields(Fields{ + "key1": "value1", + }) + + localLog.WithField("key2", "value2").Info("test") + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assert.Equal(t, "value2", fields["key2"]) + assert.Equal(t, "value1", fields["key1"]) + + buffer = bytes.Buffer{} + fields = Fields{} + localLog.Info("test") + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + _, ok := fields["key2"] + assert.Equal(t, false, ok) + assert.Equal(t, "value1", fields["key1"]) +} + +func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + }) +} + +func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["fields.msg"], "hello") + }) +} + +func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("time", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["fields.time"], "hello") + }) +} + +func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("level", 1).Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["level"], "info") + assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only + }) +} + +func TestDefaultFieldsAreNotPrefixed(t *testing.T) { + LogAndAssertText(t, func(log *Logger) { + ll := log.WithField("herp", "derp") + ll.Info("hello") + ll.Info("bye") + }, func(fields map[string]string) { + for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { + if _, ok := fields[fieldName]; ok { + t.Fatalf("should not have prefixed %q: %v", fieldName, fields) + } + } + }) +} + +func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { + + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + llog := logger.WithField("context", "eating raw fish") + + llog.Info("looks delicious") + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded first message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "looks delicious") + assert.Equal(t, fields["context"], "eating raw fish") + + buffer.Reset() + + llog.Warn("omg it is!") + + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded second message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "omg it is!") + assert.Equal(t, fields["context"], "eating raw fish") + assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") + +} + +func TestConvertLevelToString(t *testing.T) { + assert.Equal(t, "debug", DebugLevel.String()) + assert.Equal(t, "info", InfoLevel.String()) + assert.Equal(t, "warning", WarnLevel.String()) + assert.Equal(t, "error", ErrorLevel.String()) + assert.Equal(t, "fatal", FatalLevel.String()) + assert.Equal(t, "panic", PanicLevel.String()) +} + +func TestParseLevel(t *testing.T) { + l, err := ParseLevel("panic") + assert.Nil(t, err) + assert.Equal(t, PanicLevel, l) + + l, err = ParseLevel("PANIC") + assert.Nil(t, err) + assert.Equal(t, PanicLevel, l) + + l, err = ParseLevel("fatal") + assert.Nil(t, err) + assert.Equal(t, FatalLevel, l) + + l, err = ParseLevel("FATAL") + assert.Nil(t, err) + assert.Equal(t, FatalLevel, l) + + l, err = ParseLevel("error") + assert.Nil(t, err) + assert.Equal(t, ErrorLevel, l) + + l, err = ParseLevel("ERROR") + assert.Nil(t, err) + assert.Equal(t, ErrorLevel, l) + + l, err = ParseLevel("warn") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("WARN") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("warning") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("WARNING") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("info") + assert.Nil(t, err) + assert.Equal(t, InfoLevel, l) + + l, err = ParseLevel("INFO") + assert.Nil(t, err) + assert.Equal(t, InfoLevel, l) + + l, err = ParseLevel("debug") + assert.Nil(t, err) + assert.Equal(t, DebugLevel, l) + + l, err = ParseLevel("DEBUG") + assert.Nil(t, err) + assert.Equal(t, DebugLevel, l) + + l, err = ParseLevel("invalid") + assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) +} + +func TestGetSetLevelRace(t *testing.T) { + wg := sync.WaitGroup{} + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + if i%2 == 0 { + SetLevel(InfoLevel) + } else { + GetLevel() + } + }(i) + + } + wg.Wait() +} + +func TestLoggingRace(t *testing.T) { + logger := New() + + var wg sync.WaitGroup + wg.Add(100) + + for i := 0; i < 100; i++ { + go func() { + logger.Info("info") + wg.Done() + }() + } + wg.Wait() +} + +// Compile test +func TestLogrusInterface(t *testing.T) { + var buffer bytes.Buffer + fn := func(l FieldLogger) { + b := l.WithField("key", "value") + b.Debug("Test") + } + // test logger + logger := New() + logger.Out = &buffer + fn(logger) + + // test Entry + e := logger.WithField("another", "value") + fn(e) +} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter_test.go b/vendor/github.com/sirupsen/logrus/text_formatter_test.go new file mode 100644 index 000000000000..e25a44f67bfe --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/text_formatter_test.go @@ -0,0 +1,61 @@ +package logrus + +import ( + "bytes" + "errors" + "testing" + "time" +) + +func TestQuoting(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + checkQuoting := func(q bool, value interface{}) { + b, _ := tf.Format(WithField("test", value)) + idx := bytes.Index(b, ([]byte)("test=")) + cont := bytes.Contains(b[idx+5:], []byte{'"'}) + if cont != q { + if q { + t.Errorf("quoting expected for: %#v", value) + } else { + t.Errorf("quoting not expected for: %#v", value) + } + } + } + + checkQuoting(false, "abcd") + checkQuoting(false, "v1.0") + checkQuoting(false, "1234567890") + checkQuoting(true, "/foobar") + checkQuoting(true, "x y") + checkQuoting(true, "x,y") + checkQuoting(false, errors.New("invalid")) + checkQuoting(true, errors.New("invalid argument")) +} + +func TestTimestampFormat(t *testing.T) { + checkTimeStr := func(format string) { + customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} + customStr, _ := customFormatter.Format(WithField("test", "test")) + timeStart := bytes.Index(customStr, ([]byte)("time=")) + timeEnd := bytes.Index(customStr, ([]byte)("level=")) + timeStr := customStr[timeStart+5 : timeEnd-1] + if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { + timeStr = timeStr[1 : len(timeStr)-1] + } + if format == "" { + format = time.RFC3339 + } + _, e := time.Parse(format, (string)(timeStr)) + if e != nil { + t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) + } + } + + checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") + checkTimeStr("Mon Jan _2 15:04:05 2006") + checkTimeStr("") +} + +// TODO add tests for sorting etc., this requires a parser for the text +// formatter output. diff --git a/vendor/github.com/spf13/cobra/BUILD b/vendor/github.com/spf13/cobra/BUILD index fe984556dc71..bbd3bb8e294b 100644 --- a/vendor/github.com/spf13/cobra/BUILD +++ b/vendor/github.com/spf13/cobra/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -19,6 +14,7 @@ go_library( "//conditions:default": [], }), importpath = "github.com/spf13/cobra", + visibility = ["//visibility:public"], deps = [ "//vendor/github.com/spf13/pflag:go_default_library", ] + select({ @@ -29,6 +25,18 @@ go_library( }), ) +go_test( + name = "go_default_test", + srcs = [ + "bash_completions_test.go", + "cobra_test.go", + "command_test.go", + ], + importpath = "github.com/spf13/cobra", + library = ":go_default_library", + deps = ["//vendor/github.com/spf13/pflag:go_default_library"], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -40,4 +48,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/spf13/cobra/bash_completions_test.go b/vendor/github.com/spf13/cobra/bash_completions_test.go new file mode 100644 index 000000000000..6957f8bd8be3 --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completions_test.go @@ -0,0 +1,140 @@ +package cobra + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "strings" + "testing" +) + +var _ = fmt.Println +var _ = os.Stderr + +func checkOmit(t *testing.T, found, unexpected string) { + if strings.Contains(found, unexpected) { + t.Errorf("Unexpected response.\nGot: %q\nBut should not have!\n", unexpected) + } +} + +func check(t *testing.T, found, expected string) { + if !strings.Contains(found, expected) { + t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) + } +} + +func runShellCheck(s string) error { + excluded := []string{ + "SC2034", // PREFIX appears unused. Verify it or export it. + } + cmd := exec.Command("shellcheck", "-s", "bash", "-", "-e", strings.Join(excluded, ",")) + cmd.Stderr = os.Stderr + cmd.Stdout = os.Stdout + + stdin, err := cmd.StdinPipe() + if err != nil { + return err + } + go func() { + defer stdin.Close() + stdin.Write([]byte(s)) + }() + + return cmd.Run() +} + +// World worst custom function, just keep telling you to enter hello! +const ( + bashCompletionFunc = `__custom_func() { +COMPREPLY=( "hello" ) +} +` +) + +func TestBashCompletions(t *testing.T) { + c := initializeWithRootCmd() + cmdEcho.AddCommand(cmdTimes) + c.AddCommand(cmdEcho, cmdPrint, cmdDeprecated, cmdColon) + + // custom completion function + c.BashCompletionFunction = bashCompletionFunc + + // required flag + c.MarkFlagRequired("introot") + + // valid nouns + validArgs := []string{"pod", "node", "service", "replicationcontroller"} + c.ValidArgs = validArgs + + // noun aliases + argAliases := []string{"pods", "nodes", "services", "replicationcontrollers", "po", "no", "svc", "rc"} + c.ArgAliases = argAliases + + // filename + var flagval string + c.Flags().StringVar(&flagval, "filename", "", "Enter a filename") + c.MarkFlagFilename("filename", "json", "yaml", "yml") + + // persistent filename + var flagvalPersistent string + c.PersistentFlags().StringVar(&flagvalPersistent, "persistent-filename", "", "Enter a filename") + c.MarkPersistentFlagFilename("persistent-filename") + c.MarkPersistentFlagRequired("persistent-filename") + + // filename extensions + var flagvalExt string + c.Flags().StringVar(&flagvalExt, "filename-ext", "", "Enter a filename (extension limited)") + c.MarkFlagFilename("filename-ext") + + // filename extensions + var flagvalCustom string + c.Flags().StringVar(&flagvalCustom, "custom", "", "Enter a filename (extension limited)") + c.MarkFlagCustom("custom", "__complete_custom") + + // subdirectories in a given directory + var flagvalTheme string + c.Flags().StringVar(&flagvalTheme, "theme", "", "theme to use (located in /themes/THEMENAME/)") + c.Flags().SetAnnotation("theme", BashCompSubdirsInDir, []string{"themes"}) + + out := new(bytes.Buffer) + c.GenBashCompletion(out) + str := out.String() + + check(t, str, "_cobra-test") + check(t, str, "_cobra-test_echo") + check(t, str, "_cobra-test_echo_times") + check(t, str, "_cobra-test_print") + check(t, str, "_cobra-test_cmd__colon") + + // check for required flags + check(t, str, `must_have_one_flag+=("--introot=")`) + check(t, str, `must_have_one_flag+=("--persistent-filename=")`) + // check for custom completion function + check(t, str, `COMPREPLY=( "hello" )`) + // check for required nouns + check(t, str, `must_have_one_noun+=("pod")`) + // check for noun aliases + check(t, str, `noun_aliases+=("pods")`) + check(t, str, `noun_aliases+=("rc")`) + checkOmit(t, str, `must_have_one_noun+=("pods")`) + // check for filename extension flags + check(t, str, `flags_completion+=("_filedir")`) + // check for filename extension flags + check(t, str, `flags_completion+=("__handle_filename_extension_flag json|yaml|yml")`) + // check for custom flags + check(t, str, `flags_completion+=("__complete_custom")`) + // check for subdirs_in_dir flags + check(t, str, `flags_completion+=("__handle_subdirs_in_dir_flag themes")`) + + checkOmit(t, str, cmdDeprecated.Name()) + + // if available, run shellcheck against the script + if err := exec.Command("which", "shellcheck").Run(); err != nil { + return + } + err := runShellCheck(str) + if err != nil { + t.Fatalf("shellcheck failed: %v", err) + } +} diff --git a/vendor/github.com/spf13/cobra/cobra_test.go b/vendor/github.com/spf13/cobra/cobra_test.go new file mode 100644 index 000000000000..97090b107c13 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra_test.go @@ -0,0 +1,1188 @@ +package cobra + +import ( + "bytes" + "fmt" + "os" + "reflect" + "runtime" + "strings" + "testing" + "text/template" + + "github.com/spf13/pflag" +) + +var _ = fmt.Println +var _ = os.Stderr + +var tp, te, tt, t1, tr []string +var rootPersPre, echoPre, echoPersPre, timesPersPre []string +var flagb1, flagb2, flagb3, flagbr, flagbp bool +var flags1, flags2a, flags2b, flags3, outs string +var flagi1, flagi2, flagi3, flagi4, flagir int +var globalFlag1 bool +var flagEcho, rootcalled bool +var versionUsed int + +const strtwoParentHelp = "help message for parent flag strtwo" +const strtwoChildHelp = "help message for child flag strtwo" + +var cmdHidden = &Command{ + Use: "hide [secret string to print]", + Short: "Print anything to screen (if command is known)", + Long: `an absolutely utterly useless command for testing.`, + Run: func(cmd *Command, args []string) { + outs = "hidden" + }, + Hidden: true, +} + +var cmdPrint = &Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `an absolutely utterly useless command for testing.`, + Run: func(cmd *Command, args []string) { + tp = args + }, +} + +var cmdEcho = &Command{ + Use: "echo [string to echo]", + Aliases: []string{"say"}, + Short: "Echo anything to the screen", + Long: `an utterly useless command for testing.`, + Example: "Just run cobra-test echo", + PersistentPreRun: func(cmd *Command, args []string) { + echoPersPre = args + }, + PreRun: func(cmd *Command, args []string) { + echoPre = args + }, + Run: func(cmd *Command, args []string) { + te = args + }, +} + +var cmdEchoSub = &Command{ + Use: "echosub [string to print]", + Short: "second sub command for echo", + Long: `an absolutely utterly useless command for testing gendocs!.`, + Run: func(cmd *Command, args []string) { + }, +} + +var cmdDeprecated = &Command{ + Use: "deprecated [can't do anything here]", + Short: "A command which is deprecated", + Long: `an absolutely utterly useless command for testing deprecation!.`, + Deprecated: "Please use echo instead", + Run: func(cmd *Command, args []string) { + }, +} + +var cmdTimes = &Command{ + Use: "times [# times] [string to echo]", + SuggestFor: []string{"counts"}, + Short: "Echo anything to the screen more times", + Long: `a slightly useless command for testing.`, + PersistentPreRun: func(cmd *Command, args []string) { + timesPersPre = args + }, + Run: func(cmd *Command, args []string) { + tt = args + }, +} + +var cmdRootNoRun = &Command{ + Use: "cobra-test", + Short: "The root can run its own function", + Long: "The root description for help", + PersistentPreRun: func(cmd *Command, args []string) { + rootPersPre = args + }, +} + +var cmdRootSameName = &Command{ + Use: "print", + Short: "Root with the same name as a subcommand", + Long: "The root description for help", +} + +var cmdRootWithRun = &Command{ + Use: "cobra-test", + Short: "The root can run its own function", + Long: "The root description for help", + Run: func(cmd *Command, args []string) { + tr = args + rootcalled = true + }, +} + +var cmdSubNoRun = &Command{ + Use: "subnorun", + Short: "A subcommand without a Run function", + Long: "A long output about a subcommand without a Run function", +} + +var cmdCustomFlags = &Command{ + Use: "customflags [flags] -- REMOTE_COMMAND", + Short: "A command that expects flags in a custom location", + Long: "A long output about a command that expects flags in a custom location", + Run: func(cmd *Command, args []string) { + }, +} + +var cmdVersion1 = &Command{ + Use: "version", + Short: "Print the version number", + Long: `First version of the version command`, + Run: func(cmd *Command, args []string) { + versionUsed = 1 + }, +} + +var cmdVersion2 = &Command{ + Use: "version", + Short: "Print the version number", + Long: `Second version of the version command`, + Run: func(cmd *Command, args []string) { + versionUsed = 2 + }, +} + +var cmdColon = &Command{ + Use: "cmd:colon", + Run: func(cmd *Command, args []string) { + }, +} + +func flagInit() { + cmdEcho.ResetFlags() + cmdPrint.ResetFlags() + cmdTimes.ResetFlags() + cmdRootNoRun.ResetFlags() + cmdRootSameName.ResetFlags() + cmdRootWithRun.ResetFlags() + cmdSubNoRun.ResetFlags() + cmdCustomFlags.ResetFlags() + cmdRootNoRun.PersistentFlags().StringVarP(&flags2a, "strtwo", "t", "two", strtwoParentHelp) + cmdEcho.Flags().IntVarP(&flagi1, "intone", "i", 123, "help message for flag intone") + cmdTimes.Flags().IntVarP(&flagi2, "inttwo", "j", 234, "help message for flag inttwo") + cmdPrint.Flags().IntVarP(&flagi3, "intthree", "i", 345, "help message for flag intthree") + cmdCustomFlags.Flags().IntVar(&flagi4, "intfour", 456, "help message for flag intfour") + cmdEcho.PersistentFlags().StringVarP(&flags1, "strone", "s", "one", "help message for flag strone") + cmdEcho.PersistentFlags().BoolVarP(&flagbp, "persistentbool", "p", false, "help message for flag persistentbool") + cmdTimes.PersistentFlags().StringVarP(&flags2b, "strtwo", "t", "2", strtwoChildHelp) + cmdPrint.PersistentFlags().StringVarP(&flags3, "strthree", "s", "three", "help message for flag strthree") + cmdEcho.Flags().BoolVarP(&flagb1, "boolone", "b", true, "help message for flag boolone") + cmdTimes.Flags().BoolVarP(&flagb2, "booltwo", "c", false, "help message for flag booltwo") + cmdPrint.Flags().BoolVarP(&flagb3, "boolthree", "b", true, "help message for flag boolthree") + cmdVersion1.ResetFlags() + cmdVersion2.ResetFlags() +} + +func commandInit() { + cmdEcho.ResetCommands() + cmdPrint.ResetCommands() + cmdTimes.ResetCommands() + cmdRootNoRun.ResetCommands() + cmdRootSameName.ResetCommands() + cmdRootWithRun.ResetCommands() + cmdSubNoRun.ResetCommands() + cmdCustomFlags.ResetCommands() +} + +func initialize() *Command { + tt, tp, te = nil, nil, nil + rootPersPre, echoPre, echoPersPre, timesPersPre = nil, nil, nil, nil + + var c = cmdRootNoRun + flagInit() + commandInit() + return c +} + +func initializeWithSameName() *Command { + tt, tp, te = nil, nil, nil + rootPersPre, echoPre, echoPersPre, timesPersPre = nil, nil, nil, nil + var c = cmdRootSameName + flagInit() + commandInit() + return c +} + +func initializeWithRootCmd() *Command { + cmdRootWithRun.ResetCommands() + tt, tp, te, tr, rootcalled = nil, nil, nil, nil, false + flagInit() + cmdRootWithRun.Flags().BoolVarP(&flagbr, "boolroot", "b", false, "help message for flag boolroot") + cmdRootWithRun.Flags().IntVarP(&flagir, "introot", "i", 321, "help message for flag introot") + commandInit() + return cmdRootWithRun +} + +type resulter struct { + Error error + Output string + Command *Command +} + +func fullSetupTest(input string) resulter { + c := initializeWithRootCmd() + + return fullTester(c, input) +} + +func noRRSetupTestSilenced(input string) resulter { + c := initialize() + c.SilenceErrors = true + c.SilenceUsage = true + return fullTester(c, input) +} + +func noRRSetupTest(input string) resulter { + c := initialize() + + return fullTester(c, input) +} + +func rootOnlySetupTest(input string) resulter { + c := initializeWithRootCmd() + + return simpleTester(c, input) +} + +func simpleTester(c *Command, input string) resulter { + buf := new(bytes.Buffer) + // Testing flag with invalid input + c.SetOutput(buf) + c.SetArgs(strings.Split(input, " ")) + + err := c.Execute() + output := buf.String() + + return resulter{err, output, c} +} + +func simpleTesterC(c *Command, input string) resulter { + buf := new(bytes.Buffer) + // Testing flag with invalid input + c.SetOutput(buf) + c.SetArgs(strings.Split(input, " ")) + + cmd, err := c.ExecuteC() + output := buf.String() + + return resulter{err, output, cmd} +} + +func fullTester(c *Command, input string) resulter { + buf := new(bytes.Buffer) + // Testing flag with invalid input + c.SetOutput(buf) + cmdEcho.AddCommand(cmdTimes) + c.AddCommand(cmdPrint, cmdEcho, cmdSubNoRun, cmdCustomFlags, cmdDeprecated) + c.SetArgs(strings.Split(input, " ")) + + err := c.Execute() + output := buf.String() + + return resulter{err, output, c} +} + +func logErr(t *testing.T, found, expected string) { + out := new(bytes.Buffer) + + _, _, line, ok := runtime.Caller(2) + if ok { + fmt.Fprintf(out, "Line: %d ", line) + } + fmt.Fprintf(out, "Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) + t.Errorf(out.String()) +} + +func checkStringContains(t *testing.T, found, expected string) { + if !strings.Contains(found, expected) { + logErr(t, found, expected) + } +} + +func checkResultContains(t *testing.T, x resulter, check string) { + checkStringContains(t, x.Output, check) +} + +func checkStringOmits(t *testing.T, found, expected string) { + if strings.Contains(found, expected) { + logErr(t, found, expected) + } +} + +func checkResultOmits(t *testing.T, x resulter, check string) { + checkStringOmits(t, x.Output, check) +} + +func checkOutputContains(t *testing.T, c *Command, check string) { + buf := new(bytes.Buffer) + c.SetOutput(buf) + c.Execute() + + if !strings.Contains(buf.String(), check) { + logErr(t, buf.String(), check) + } +} + +func TestSingleCommand(t *testing.T) { + noRRSetupTest("print one two") + + if te != nil || tt != nil { + t.Error("Wrong command called") + } + if tp == nil { + t.Error("Wrong command called") + } + if strings.Join(tp, " ") != "one two" { + t.Error("Command didn't parse correctly") + } +} + +func TestChildCommand(t *testing.T) { + noRRSetupTest("echo times one two") + + if te != nil || tp != nil { + t.Error("Wrong command called") + } + if tt == nil { + t.Error("Wrong command called") + } + if strings.Join(tt, " ") != "one two" { + t.Error("Command didn't parse correctly") + } +} + +func TestCommandAlias(t *testing.T) { + noRRSetupTest("say times one two") + + if te != nil || tp != nil { + t.Error("Wrong command called") + } + if tt == nil { + t.Error("Wrong command called") + } + if strings.Join(tt, " ") != "one two" { + t.Error("Command didn't parse correctly") + } +} + +func TestPrefixMatching(t *testing.T) { + EnablePrefixMatching = true + noRRSetupTest("ech times one two") + + if te != nil || tp != nil { + t.Error("Wrong command called") + } + if tt == nil { + t.Error("Wrong command called") + } + if strings.Join(tt, " ") != "one two" { + t.Error("Command didn't parse correctly") + } + + EnablePrefixMatching = false +} + +func TestNoPrefixMatching(t *testing.T) { + EnablePrefixMatching = false + + noRRSetupTest("ech times one two") + + if !(tt == nil && te == nil && tp == nil) { + t.Error("Wrong command called") + } +} + +func TestAliasPrefixMatching(t *testing.T) { + EnablePrefixMatching = true + noRRSetupTest("sa times one two") + + if te != nil || tp != nil { + t.Error("Wrong command called") + } + if tt == nil { + t.Error("Wrong command called") + } + if strings.Join(tt, " ") != "one two" { + t.Error("Command didn't parse correctly") + } + EnablePrefixMatching = false +} + +func TestChildSameName(t *testing.T) { + c := initializeWithSameName() + c.AddCommand(cmdPrint, cmdEcho) + c.SetArgs(strings.Split("print one two", " ")) + c.Execute() + + if te != nil || tt != nil { + t.Error("Wrong command called") + } + if tp == nil { + t.Error("Wrong command called") + } + if strings.Join(tp, " ") != "one two" { + t.Error("Command didn't parse correctly") + } +} + +func TestGrandChildSameName(t *testing.T) { + c := initializeWithSameName() + cmdTimes.AddCommand(cmdPrint) + c.AddCommand(cmdTimes) + c.SetArgs(strings.Split("times print one two", " ")) + c.Execute() + + if te != nil || tt != nil { + t.Error("Wrong command called") + } + if tp == nil { + t.Error("Wrong command called") + } + if strings.Join(tp, " ") != "one two" { + t.Error("Command didn't parse correctly") + } +} + +func TestUsage(t *testing.T) { + x := fullSetupTest("help") + checkResultContains(t, x, cmdRootWithRun.Use+" [flags]") + x = fullSetupTest("help customflags") + checkResultContains(t, x, cmdCustomFlags.Use) + checkResultOmits(t, x, cmdCustomFlags.Use+" [flags]") +} + +func TestFlagLong(t *testing.T) { + noRRSetupTest("echo --intone=13 something -- here") + + if cmdEcho.ArgsLenAtDash() != 1 { + t.Errorf("expected argsLenAtDash: %d but got %d", 1, cmdRootNoRun.ArgsLenAtDash()) + } + if strings.Join(te, " ") != "something here" { + t.Errorf("flags didn't leave proper args remaining..%s given", te) + } + if flagi1 != 13 { + t.Errorf("int flag didn't get correct value, had %d", flagi1) + } + if flagi2 != 234 { + t.Errorf("default flag value changed, 234 expected, %d given", flagi2) + } +} + +func TestFlagShort(t *testing.T) { + noRRSetupTest("echo -i13 -- something here") + + if cmdEcho.ArgsLenAtDash() != 0 { + t.Errorf("expected argsLenAtDash: %d but got %d", 0, cmdRootNoRun.ArgsLenAtDash()) + } + if strings.Join(te, " ") != "something here" { + t.Errorf("flags didn't leave proper args remaining..%s given", te) + } + if flagi1 != 13 { + t.Errorf("int flag didn't get correct value, had %d", flagi1) + } + if flagi2 != 234 { + t.Errorf("default flag value changed, 234 expected, %d given", flagi2) + } + + noRRSetupTest("echo -i 13 something here") + + if strings.Join(te, " ") != "something here" { + t.Errorf("flags didn't leave proper args remaining..%s given", te) + } + if flagi1 != 13 { + t.Errorf("int flag didn't get correct value, had %d", flagi1) + } + if flagi2 != 234 { + t.Errorf("default flag value changed, 234 expected, %d given", flagi2) + } + + noRRSetupTest("print -i99 one two") + + if strings.Join(tp, " ") != "one two" { + t.Errorf("flags didn't leave proper args remaining..%s given", tp) + } + if flagi3 != 99 { + t.Errorf("int flag didn't get correct value, had %d", flagi3) + } + if flagi1 != 123 { + t.Errorf("default flag value changed on different command with same shortname, 234 expected, %d given", flagi2) + } +} + +func TestChildCommandFlags(t *testing.T) { + noRRSetupTest("echo times -j 99 one two") + + if strings.Join(tt, " ") != "one two" { + t.Errorf("flags didn't leave proper args remaining..%s given", tt) + } + + // Testing with flag that shouldn't be persistent + r := noRRSetupTest("echo times -j 99 -i77 one two") + + if r.Error == nil { + t.Errorf("invalid flag should generate error") + } + + if !strings.Contains(r.Error.Error(), "unknown shorthand") { + t.Errorf("Wrong error message displayed, \n %s", r.Error) + } + + if flagi2 != 99 { + t.Errorf("flag value should be 99, %d given", flagi2) + } + + if flagi1 != 123 { + t.Errorf("unset flag should have default value, expecting 123, given %d", flagi1) + } + + // Testing with flag only existing on child + r = noRRSetupTest("echo -j 99 -i77 one two") + + if r.Error == nil { + t.Errorf("invalid flag should generate error") + } + if !strings.Contains(r.Error.Error(), "unknown shorthand flag") { + t.Errorf("Wrong error message displayed, \n %s", r.Error) + } + + // Testing with persistent flag overwritten by child + noRRSetupTest("echo times --strtwo=child one two") + + if flags2b != "child" { + t.Errorf("flag value should be child, %s given", flags2b) + } + + if flags2a != "two" { + t.Errorf("unset flag should have default value, expecting two, given %s", flags2a) + } + + // Testing flag with invalid input + r = noRRSetupTest("echo -i10E") + + if r.Error == nil { + t.Errorf("invalid input should generate error") + } + if !strings.Contains(r.Error.Error(), "invalid argument \"10E\" for i10E") { + t.Errorf("Wrong error message displayed, \n %s", r.Error) + } +} + +func TestTrailingCommandFlags(t *testing.T) { + x := fullSetupTest("echo two -x") + + if x.Error == nil { + t.Errorf("invalid flag should generate error") + } +} + +func TestInvalidSubcommandFlags(t *testing.T) { + cmd := initializeWithRootCmd() + cmd.AddCommand(cmdTimes) + + result := simpleTester(cmd, "times --inttwo=2 --badflag=bar") + // given that we are not checking here result.Error we check for + // stock usage message + checkResultContains(t, result, "cobra-test times [# times]") + if strings.Contains(result.Error.Error(), "unknown flag: --inttwo") { + t.Errorf("invalid --badflag flag shouldn't fail on 'unknown' --inttwo flag") + } + +} + +func TestSubcommandExecuteC(t *testing.T) { + cmd := initializeWithRootCmd() + double := &Command{ + Use: "double message", + Run: func(c *Command, args []string) { + msg := strings.Join(args, " ") + c.Println(msg, msg) + }, + } + + echo := &Command{ + Use: "echo message", + Run: func(c *Command, args []string) { + msg := strings.Join(args, " ") + c.Println(msg, msg) + }, + } + + cmd.AddCommand(double, echo) + + result := simpleTesterC(cmd, "double hello world") + checkResultContains(t, result, "hello world hello world") + + if result.Command.Name() != "double" { + t.Errorf("invalid cmd returned from ExecuteC: should be 'double' but got %s", result.Command.Name()) + } + + result = simpleTesterC(cmd, "echo msg to be echoed") + checkResultContains(t, result, "msg to be echoed") + + if result.Command.Name() != "echo" { + t.Errorf("invalid cmd returned from ExecuteC: should be 'echo' but got %s", result.Command.Name()) + } +} + +func TestSubcommandArgEvaluation(t *testing.T) { + cmd := initializeWithRootCmd() + + first := &Command{ + Use: "first", + Run: func(cmd *Command, args []string) { + }, + } + cmd.AddCommand(first) + + second := &Command{ + Use: "second", + Run: func(cmd *Command, args []string) { + fmt.Fprintf(cmd.Out(), "%v", args) + }, + } + first.AddCommand(second) + + result := simpleTester(cmd, "first second first third") + + expectedOutput := fmt.Sprintf("%v", []string{"first third"}) + if result.Output != expectedOutput { + t.Errorf("exptected %v, got %v", expectedOutput, result.Output) + } +} + +func TestPersistentFlags(t *testing.T) { + fullSetupTest("echo -s something -p more here") + + // persistentFlag should act like normal flag on its own command + if strings.Join(te, " ") != "more here" { + t.Errorf("flags didn't leave proper args remaining..%s given", te) + } + if flags1 != "something" { + t.Errorf("string flag didn't get correct value, had %v", flags1) + } + if !flagbp { + t.Errorf("persistent bool flag not parsed correctly. Expected true, had %v", flagbp) + } + + // persistentFlag should act like normal flag on its own command + fullSetupTest("echo times -s again -c -p test here") + + if strings.Join(tt, " ") != "test here" { + t.Errorf("flags didn't leave proper args remaining..%s given", tt) + } + + if flags1 != "again" { + t.Errorf("string flag didn't get correct value, had %v", flags1) + } + + if !flagb2 { + t.Errorf("local flag not parsed correctly. Expected true, had %v", flagb2) + } + if !flagbp { + t.Errorf("persistent bool flag not parsed correctly. Expected true, had %v", flagbp) + } +} + +func TestHelpCommand(t *testing.T) { + x := fullSetupTest("help") + checkResultContains(t, x, cmdRootWithRun.Long) + + x = fullSetupTest("help echo") + checkResultContains(t, x, cmdEcho.Long) + + x = fullSetupTest("help echo times") + checkResultContains(t, x, cmdTimes.Long) +} + +func TestChildCommandHelp(t *testing.T) { + c := noRRSetupTest("print --help") + checkResultContains(t, c, strtwoParentHelp) + r := noRRSetupTest("echo times --help") + checkResultContains(t, r, strtwoChildHelp) +} + +func TestNonRunChildHelp(t *testing.T) { + x := noRRSetupTest("subnorun") + checkResultContains(t, x, cmdSubNoRun.Long) +} + +func TestRunnableRootCommand(t *testing.T) { + x := fullSetupTest("") + + if rootcalled != true { + t.Errorf("Root Function was not called\n out:%v", x.Error) + } +} + +func TestVisitParents(t *testing.T) { + c := &Command{Use: "app"} + sub := &Command{Use: "sub"} + dsub := &Command{Use: "dsub"} + sub.AddCommand(dsub) + c.AddCommand(sub) + total := 0 + add := func(x *Command) { + total++ + } + sub.VisitParents(add) + if total != 1 { + t.Errorf("Should have visited 1 parent but visited %d", total) + } + + total = 0 + dsub.VisitParents(add) + if total != 2 { + t.Errorf("Should have visited 2 parent but visited %d", total) + } + + total = 0 + c.VisitParents(add) + if total != 0 { + t.Errorf("Should have not visited any parent but visited %d", total) + } +} + +func TestRunnableRootCommandNilInput(t *testing.T) { + var emptyArg []string + c := initializeWithRootCmd() + + buf := new(bytes.Buffer) + // Testing flag with invalid input + c.SetOutput(buf) + cmdEcho.AddCommand(cmdTimes) + c.AddCommand(cmdPrint, cmdEcho) + c.SetArgs(emptyArg) + + err := c.Execute() + if err != nil { + t.Errorf("Execute() failed with %v", err) + } + + if rootcalled != true { + t.Errorf("Root Function was not called") + } +} + +func TestRunnableRootCommandEmptyInput(t *testing.T) { + args := make([]string, 3) + args[0] = "" + args[1] = "--introot=12" + args[2] = "" + c := initializeWithRootCmd() + + buf := new(bytes.Buffer) + // Testing flag with invalid input + c.SetOutput(buf) + cmdEcho.AddCommand(cmdTimes) + c.AddCommand(cmdPrint, cmdEcho) + c.SetArgs(args) + + c.Execute() + + if rootcalled != true { + t.Errorf("Root Function was not called.\n\nOutput was:\n\n%s\n", buf) + } +} + +func TestInvalidSubcommandWhenArgsAllowed(t *testing.T) { + fullSetupTest("echo invalid-sub") + + if te[0] != "invalid-sub" { + t.Errorf("Subcommand didn't work...") + } +} + +func TestRootFlags(t *testing.T) { + fullSetupTest("-i 17 -b") + + if flagbr != true { + t.Errorf("flag value should be true, %v given", flagbr) + } + + if flagir != 17 { + t.Errorf("flag value should be 17, %d given", flagir) + } +} + +func TestRootHelp(t *testing.T) { + x := fullSetupTest("--help") + + checkResultContains(t, x, "Available Commands:") + checkResultContains(t, x, "for more information about a command") + + if strings.Contains(x.Output, "unknown flag: --help") { + t.Errorf("--help shouldn't trigger an error, Got: \n %s", x.Output) + } + + if strings.Contains(x.Output, cmdEcho.Use) { + t.Errorf("--help shouldn't display subcommand's usage, Got: \n %s", x.Output) + } + + x = fullSetupTest("echo --help") + + if strings.Contains(x.Output, cmdTimes.Use) { + t.Errorf("--help shouldn't display subsubcommand's usage, Got: \n %s", x.Output) + } + + checkResultContains(t, x, "Available Commands:") + checkResultContains(t, x, "for more information about a command") + + if strings.Contains(x.Output, "unknown flag: --help") { + t.Errorf("--help shouldn't trigger an error, Got: \n %s", x.Output) + } + +} + +func TestFlagAccess(t *testing.T) { + initialize() + + local := cmdTimes.LocalFlags() + inherited := cmdTimes.InheritedFlags() + + for _, f := range []string{"inttwo", "strtwo", "booltwo"} { + if local.Lookup(f) == nil { + t.Errorf("LocalFlags expected to contain %s, Got: nil", f) + } + } + if inherited.Lookup("strone") == nil { + t.Errorf("InheritedFlags expected to contain strone, Got: nil") + } + if inherited.Lookup("strtwo") != nil { + t.Errorf("InheritedFlags shouldn not contain overwritten flag strtwo") + + } +} + +func TestNoNRunnableRootCommandNilInput(t *testing.T) { + var args []string + c := initialize() + + buf := new(bytes.Buffer) + // Testing flag with invalid input + c.SetOutput(buf) + cmdEcho.AddCommand(cmdTimes) + c.AddCommand(cmdPrint, cmdEcho) + c.SetArgs(args) + + c.Execute() + + if !strings.Contains(buf.String(), cmdRootNoRun.Long) { + t.Errorf("Expected to get help output, Got: \n %s", buf) + } +} + +func TestRootNoCommandHelp(t *testing.T) { + x := rootOnlySetupTest("--help") + + checkResultOmits(t, x, "Available Commands:") + checkResultOmits(t, x, "for more information about a command") + + if strings.Contains(x.Output, "unknown flag: --help") { + t.Errorf("--help shouldn't trigger an error, Got: \n %s", x.Output) + } + + x = rootOnlySetupTest("echo --help") + + checkResultOmits(t, x, "Available Commands:") + checkResultOmits(t, x, "for more information about a command") + + if strings.Contains(x.Output, "unknown flag: --help") { + t.Errorf("--help shouldn't trigger an error, Got: \n %s", x.Output) + } +} + +func TestRootUnknownCommand(t *testing.T) { + r := noRRSetupTest("bogus") + s := "Error: unknown command \"bogus\" for \"cobra-test\"\nRun 'cobra-test --help' for usage.\n" + + if r.Output != s { + t.Errorf("Unexpected response.\nExpecting to be:\n %q\nGot:\n %q\n", s, r.Output) + } + + r = noRRSetupTest("--strtwo=a bogus") + if r.Output != s { + t.Errorf("Unexpected response.\nExpecting to be:\n %q\nGot:\n %q\n", s, r.Output) + } +} + +func TestRootUnknownCommandSilenced(t *testing.T) { + r := noRRSetupTestSilenced("bogus") + s := "Run 'cobra-test --help' for usage.\n" + + if r.Output != "" { + t.Errorf("Unexpected response.\nExpecting to be: \n\"\"\n Got:\n %q\n", s, r.Output) + } + + r = noRRSetupTestSilenced("--strtwo=a bogus") + if r.Output != "" { + t.Errorf("Unexpected response.\nExpecting to be:\n\"\"\nGot:\n %q\n", s, r.Output) + } +} + +func TestRootSuggestions(t *testing.T) { + outputWithSuggestions := "Error: unknown command \"%s\" for \"cobra-test\"\n\nDid you mean this?\n\t%s\n\nRun 'cobra-test --help' for usage.\n" + outputWithoutSuggestions := "Error: unknown command \"%s\" for \"cobra-test\"\nRun 'cobra-test --help' for usage.\n" + + cmd := initializeWithRootCmd() + cmd.AddCommand(cmdTimes) + + tests := map[string]string{ + "time": "times", + "tiems": "times", + "tims": "times", + "timeS": "times", + "rimes": "times", + "ti": "times", + "t": "times", + "timely": "times", + "ri": "", + "timezone": "", + "foo": "", + "counts": "times", + } + + for typo, suggestion := range tests { + for _, suggestionsDisabled := range []bool{false, true} { + cmd.DisableSuggestions = suggestionsDisabled + result := simpleTester(cmd, typo) + expected := "" + if len(suggestion) == 0 || suggestionsDisabled { + expected = fmt.Sprintf(outputWithoutSuggestions, typo) + } else { + expected = fmt.Sprintf(outputWithSuggestions, typo, suggestion) + } + if result.Output != expected { + t.Errorf("Unexpected response.\nExpecting to be:\n %q\nGot:\n %q\n", expected, result.Output) + } + } + } +} + +func TestFlagsBeforeCommand(t *testing.T) { + // short without space + x := fullSetupTest("-i10 echo") + if x.Error != nil { + t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) + } + + // short (int) with equals + // It appears that pflags doesn't support this... + // Commenting out until support can be added + + //x = noRRSetupTest("echo -i=10") + //if x.Error != nil { + //t.Errorf("Valid Input shouldn't have errors, got:\n %s", x.Error) + //} + + // long with equals + x = noRRSetupTest("--intone=123 echo one two") + if x.Error != nil { + t.Errorf("Valid Input shouldn't have errors, got:\n %s", x.Error) + } + + // With parsing error properly reported + x = fullSetupTest("-i10E echo") + if !strings.Contains(x.Error.Error(), "invalid argument \"10E\" for i10E") { + t.Errorf("Wrong error message displayed, \n %s", x.Error) + } + + //With quotes + x = fullSetupTest("-s=\"walking\" echo") + if x.Error != nil { + t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) + } + + //With quotes and space + x = fullSetupTest("-s=\"walking fast\" echo") + if x.Error != nil { + t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) + } + + //With inner quote + x = fullSetupTest("-s=\"walking \\\"Inner Quote\\\" fast\" echo") + if x.Error != nil { + t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) + } + + //With quotes and space + x = fullSetupTest("-s=\"walking \\\"Inner Quote\\\" fast\" echo") + if x.Error != nil { + t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) + } + +} + +func TestRemoveCommand(t *testing.T) { + versionUsed = 0 + c := initializeWithRootCmd() + c.AddCommand(cmdVersion1) + c.RemoveCommand(cmdVersion1) + x := fullTester(c, "version") + if x.Error == nil { + t.Errorf("Removed command should not have been called\n") + return + } +} + +func TestCommandWithoutSubcommands(t *testing.T) { + c := initializeWithRootCmd() + + x := simpleTester(c, "") + if x.Error != nil { + t.Errorf("Calling command without subcommands should not have error: %v", x.Error) + return + } +} + +func TestCommandWithoutSubcommandsWithArg(t *testing.T) { + c := initializeWithRootCmd() + expectedArgs := []string{"arg"} + + x := simpleTester(c, "arg") + if x.Error != nil { + t.Errorf("Calling command without subcommands but with arg should not have error: %v", x.Error) + return + } + if !reflect.DeepEqual(expectedArgs, tr) { + t.Errorf("Calling command without subcommands but with arg has wrong args: expected: %v, actual: %v", expectedArgs, tr) + return + } +} + +func TestReplaceCommandWithRemove(t *testing.T) { + versionUsed = 0 + c := initializeWithRootCmd() + c.AddCommand(cmdVersion1) + c.RemoveCommand(cmdVersion1) + c.AddCommand(cmdVersion2) + x := fullTester(c, "version") + if x.Error != nil { + t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) + return + } + if versionUsed == 1 { + t.Errorf("Removed command shouldn't be called\n") + } + if versionUsed != 2 { + t.Errorf("Replacing command should have been called but didn't\n") + } +} + +func TestDeprecatedSub(t *testing.T) { + c := fullSetupTest("deprecated") + + checkResultContains(t, c, cmdDeprecated.Deprecated) +} + +func TestPreRun(t *testing.T) { + noRRSetupTest("echo one two") + if echoPre == nil || echoPersPre == nil { + t.Error("PreRun or PersistentPreRun not called") + } + if rootPersPre != nil || timesPersPre != nil { + t.Error("Wrong *Pre functions called!") + } + + noRRSetupTest("echo times one two") + if timesPersPre == nil { + t.Error("PreRun or PersistentPreRun not called") + } + if echoPre != nil || echoPersPre != nil || rootPersPre != nil { + t.Error("Wrong *Pre functions called!") + } + + noRRSetupTest("print one two") + if rootPersPre == nil { + t.Error("Parent PersistentPreRun not called but should not have been") + } + if echoPre != nil || echoPersPre != nil || timesPersPre != nil { + t.Error("Wrong *Pre functions called!") + } +} + +// Check if cmdEchoSub gets PersistentPreRun from rootCmd even if is added last +func TestPeristentPreRunPropagation(t *testing.T) { + rootCmd := initialize() + + // First add the cmdEchoSub to cmdPrint + cmdPrint.AddCommand(cmdEchoSub) + // Now add cmdPrint to rootCmd + rootCmd.AddCommand(cmdPrint) + + rootCmd.SetArgs(strings.Split("print echosub lala", " ")) + rootCmd.Execute() + + if rootPersPre == nil || len(rootPersPre) == 0 || rootPersPre[0] != "lala" { + t.Error("RootCmd PersistentPreRun not called but should have been") + } +} + +func TestGlobalNormFuncPropagation(t *testing.T) { + normFunc := func(f *pflag.FlagSet, name string) pflag.NormalizedName { + return pflag.NormalizedName(name) + } + + rootCmd := initialize() + rootCmd.SetGlobalNormalizationFunc(normFunc) + if reflect.ValueOf(normFunc) != reflect.ValueOf(rootCmd.GlobalNormalizationFunc()) { + t.Error("rootCmd seems to have a wrong normalization function") + } + + // First add the cmdEchoSub to cmdPrint + cmdPrint.AddCommand(cmdEchoSub) + if cmdPrint.GlobalNormalizationFunc() != nil && cmdEchoSub.GlobalNormalizationFunc() != nil { + t.Error("cmdPrint and cmdEchoSub should had no normalization functions") + } + + // Now add cmdPrint to rootCmd + rootCmd.AddCommand(cmdPrint) + if reflect.ValueOf(cmdPrint.GlobalNormalizationFunc()).Pointer() != reflect.ValueOf(rootCmd.GlobalNormalizationFunc()).Pointer() || + reflect.ValueOf(cmdEchoSub.GlobalNormalizationFunc()).Pointer() != reflect.ValueOf(rootCmd.GlobalNormalizationFunc()).Pointer() { + t.Error("cmdPrint and cmdEchoSub should had the normalization function of rootCmd") + } +} + +func TestFlagOnPflagCommandLine(t *testing.T) { + flagName := "flagOnCommandLine" + pflag.CommandLine.String(flagName, "", "about my flag") + r := fullSetupTest("--help") + + checkResultContains(t, r, flagName) +} + +func TestAddTemplateFunctions(t *testing.T) { + AddTemplateFunc("t", func() bool { return true }) + AddTemplateFuncs(template.FuncMap{ + "f": func() bool { return false }, + "h": func() string { return "Hello," }, + "w": func() string { return "world." }}) + + const usage = "Hello, world." + + c := &Command{} + c.SetUsageTemplate(`{{if t}}{{h}}{{end}}{{if f}}{{h}}{{end}} {{w}}`) + + if us := c.UsageString(); us != usage { + t.Errorf("c.UsageString() != \"%s\", is \"%s\"", usage, us) + } +} + +func TestUsageIsNotPrintedTwice(t *testing.T) { + var cmd = &Command{Use: "root"} + var sub = &Command{Use: "sub"} + cmd.AddCommand(sub) + + r := simpleTester(cmd, "") + if strings.Count(r.Output, "Usage:") != 1 { + t.Error("Usage output is not printed exactly once") + } +} diff --git a/vendor/github.com/spf13/cobra/command_test.go b/vendor/github.com/spf13/cobra/command_test.go new file mode 100644 index 000000000000..43ed7a34fcc0 --- /dev/null +++ b/vendor/github.com/spf13/cobra/command_test.go @@ -0,0 +1,114 @@ +package cobra + +import ( + "reflect" + "testing" +) + +// test to ensure hidden commands run as intended +func TestHiddenCommandExecutes(t *testing.T) { + + // ensure that outs does not already equal what the command will be setting it + // to, if it did this test would not actually be testing anything... + if outs == "hidden" { + t.Errorf("outs should NOT EQUAL hidden") + } + + cmdHidden.Execute() + + // upon running the command, the value of outs should now be 'hidden' + if outs != "hidden" { + t.Errorf("Hidden command failed to run!") + } +} + +// test to ensure hidden commands do not show up in usage/help text +func TestHiddenCommandIsHidden(t *testing.T) { + if cmdHidden.IsAvailableCommand() { + t.Errorf("Hidden command found!") + } +} + +func TestStripFlags(t *testing.T) { + tests := []struct { + input []string + output []string + }{ + { + []string{"foo", "bar"}, + []string{"foo", "bar"}, + }, + { + []string{"foo", "--bar", "-b"}, + []string{"foo"}, + }, + { + []string{"-b", "foo", "--bar", "bar"}, + []string{}, + }, + { + []string{"-i10", "echo"}, + []string{"echo"}, + }, + { + []string{"-i=10", "echo"}, + []string{"echo"}, + }, + { + []string{"--int=100", "echo"}, + []string{"echo"}, + }, + { + []string{"-ib", "echo", "-bfoo", "baz"}, + []string{"echo", "baz"}, + }, + { + []string{"-i=baz", "bar", "-i", "foo", "blah"}, + []string{"bar", "blah"}, + }, + { + []string{"--int=baz", "-bbar", "-i", "foo", "blah"}, + []string{"blah"}, + }, + { + []string{"--cat", "bar", "-i", "foo", "blah"}, + []string{"bar", "blah"}, + }, + { + []string{"-c", "bar", "-i", "foo", "blah"}, + []string{"bar", "blah"}, + }, + { + []string{"--persist", "bar"}, + []string{"bar"}, + }, + { + []string{"-p", "bar"}, + []string{"bar"}, + }, + } + + cmdPrint := &Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `an utterly useless command for testing.`, + Run: func(cmd *Command, args []string) { + tp = args + }, + } + + var flagi int + var flagstr string + var flagbool bool + cmdPrint.PersistentFlags().BoolVarP(&flagbool, "persist", "p", false, "help for persistent one") + cmdPrint.Flags().IntVarP(&flagi, "int", "i", 345, "help message for flag int") + cmdPrint.Flags().StringVarP(&flagstr, "bar", "b", "bar", "help message for flag string") + cmdPrint.Flags().BoolVarP(&flagbool, "cat", "c", false, "help message for flag bool") + + for _, test := range tests { + output := stripFlags(test.input, cmdPrint) + if !reflect.DeepEqual(test.output, output) { + t.Errorf("expected: %v, got: %v", test.output, output) + } + } +} diff --git a/vendor/github.com/spf13/pflag/BUILD b/vendor/github.com/spf13/pflag/BUILD index e48a6b288896..724ed915245b 100644 --- a/vendor/github.com/spf13/pflag/BUILD +++ b/vendor/github.com/spf13/pflag/BUILD @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -34,6 +34,34 @@ go_library( visibility = ["//visibility:public"], ) +go_test( + name = "go_default_test", + srcs = [ + "bool_slice_test.go", + "bool_test.go", + "count_test.go", + "export_test.go", + "flag_test.go", + "golangflag_test.go", + "int_slice_test.go", + "ip_slice_test.go", + "ip_test.go", + "ipnet_test.go", + "string_array_test.go", + "string_slice_test.go", + "uint_slice_test.go", + ], + importpath = "github.com/spf13/pflag", + library = ":go_default_library", +) + +go_test( + name = "go_default_xtest", + srcs = ["example_test.go"], + importpath = "github.com/spf13/pflag_test", + deps = [":go_default_library"], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/vendor/github.com/spf13/pflag/bool_slice_test.go b/vendor/github.com/spf13/pflag/bool_slice_test.go new file mode 100644 index 000000000000..b617dd237f3b --- /dev/null +++ b/vendor/github.com/spf13/pflag/bool_slice_test.go @@ -0,0 +1,215 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" + "testing" +) + +func setUpBSFlagSet(bsp *[]bool) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.BoolSliceVar(bsp, "bs", []bool{}, "Command separated list!") + return f +} + +func setUpBSFlagSetWithDefault(bsp *[]bool) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.BoolSliceVar(bsp, "bs", []bool{false, true}, "Command separated list!") + return f +} + +func TestEmptyBS(t *testing.T) { + var bs []bool + f := setUpBSFlagSet(&bs) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getBS, err := f.GetBoolSlice("bs") + if err != nil { + t.Fatal("got an error from GetBoolSlice():", err) + } + if len(getBS) != 0 { + t.Fatalf("got bs %v with len=%d but expected length=0", getBS, len(getBS)) + } +} + +func TestBS(t *testing.T) { + var bs []bool + f := setUpBSFlagSet(&bs) + + vals := []string{"1", "F", "TRUE", "0"} + arg := fmt.Sprintf("--bs=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range bs { + b, err := strconv.ParseBool(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if b != v { + t.Fatalf("expected is[%d] to be %s but got: %t", i, vals[i], v) + } + } + getBS, err := f.GetBoolSlice("bs") + if err != nil { + t.Fatalf("got error: %v", err) + } + for i, v := range getBS { + b, err := strconv.ParseBool(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if b != v { + t.Fatalf("expected bs[%d] to be %s but got: %t from GetBoolSlice", i, vals[i], v) + } + } +} + +func TestBSDefault(t *testing.T) { + var bs []bool + f := setUpBSFlagSetWithDefault(&bs) + + vals := []string{"false", "T"} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range bs { + b, err := strconv.ParseBool(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if b != v { + t.Fatalf("expected bs[%d] to be %t from GetBoolSlice but got: %t", i, b, v) + } + } + + getBS, err := f.GetBoolSlice("bs") + if err != nil { + t.Fatal("got an error from GetBoolSlice():", err) + } + for i, v := range getBS { + b, err := strconv.ParseBool(vals[i]) + if err != nil { + t.Fatal("got an error from GetBoolSlice():", err) + } + if b != v { + t.Fatalf("expected bs[%d] to be %t from GetBoolSlice but got: %t", i, b, v) + } + } +} + +func TestBSWithDefault(t *testing.T) { + var bs []bool + f := setUpBSFlagSetWithDefault(&bs) + + vals := []string{"FALSE", "1"} + arg := fmt.Sprintf("--bs=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range bs { + b, err := strconv.ParseBool(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if b != v { + t.Fatalf("expected bs[%d] to be %t but got: %t", i, b, v) + } + } + + getBS, err := f.GetBoolSlice("bs") + if err != nil { + t.Fatal("got an error from GetBoolSlice():", err) + } + for i, v := range getBS { + b, err := strconv.ParseBool(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if b != v { + t.Fatalf("expected bs[%d] to be %t from GetBoolSlice but got: %t", i, b, v) + } + } +} + +func TestBSCalledTwice(t *testing.T) { + var bs []bool + f := setUpBSFlagSet(&bs) + + in := []string{"T,F", "T"} + expected := []bool{true, false, true} + argfmt := "--bs=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range bs { + if expected[i] != v { + t.Fatalf("expected bs[%d] to be %t but got %t", i, expected[i], v) + } + } +} + +func TestBSBadQuoting(t *testing.T) { + + tests := []struct { + Want []bool + FlagArg []string + }{ + { + Want: []bool{true, false, true}, + FlagArg: []string{"1", "0", "true"}, + }, + { + Want: []bool{true, false}, + FlagArg: []string{"True", "F"}, + }, + { + Want: []bool{true, false}, + FlagArg: []string{"T", "0"}, + }, + { + Want: []bool{true, false}, + FlagArg: []string{"1", "0"}, + }, + { + Want: []bool{true, false, false}, + FlagArg: []string{"true,false", "false"}, + }, + { + Want: []bool{true, false, false, true, false, true, false}, + FlagArg: []string{`"true,false,false,1,0, T"`, " false "}, + }, + { + Want: []bool{false, false, true, false, true, false, true}, + FlagArg: []string{`"0, False, T,false , true,F"`, "true"}, + }, + } + + for i, test := range tests { + + var bs []bool + f := setUpBSFlagSet(&bs) + + if err := f.Parse([]string{fmt.Sprintf("--bs=%s", strings.Join(test.FlagArg, ","))}); err != nil { + t.Fatalf("flag parsing failed with error: %s\nparsing:\t%#v\nwant:\t\t%#v", + err, test.FlagArg, test.Want[i]) + } + + for j, b := range bs { + if b != test.Want[j] { + t.Fatalf("bad value parsed for test %d on bool %d:\nwant:\t%t\ngot:\t%t", i, j, test.Want[j], b) + } + } + } +} diff --git a/vendor/github.com/spf13/pflag/bool_test.go b/vendor/github.com/spf13/pflag/bool_test.go new file mode 100644 index 000000000000..a4319e79fcda --- /dev/null +++ b/vendor/github.com/spf13/pflag/bool_test.go @@ -0,0 +1,179 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "bytes" + "strconv" + "testing" +) + +// This value can be a boolean ("true", "false") or "maybe" +type triStateValue int + +const ( + triStateFalse triStateValue = 0 + triStateTrue triStateValue = 1 + triStateMaybe triStateValue = 2 +) + +const strTriStateMaybe = "maybe" + +func (v *triStateValue) IsBoolFlag() bool { + return true +} + +func (v *triStateValue) Get() interface{} { + return triStateValue(*v) +} + +func (v *triStateValue) Set(s string) error { + if s == strTriStateMaybe { + *v = triStateMaybe + return nil + } + boolVal, err := strconv.ParseBool(s) + if boolVal { + *v = triStateTrue + } else { + *v = triStateFalse + } + return err +} + +func (v *triStateValue) String() string { + if *v == triStateMaybe { + return strTriStateMaybe + } + return strconv.FormatBool(*v == triStateTrue) +} + +// The type of the flag as required by the pflag.Value interface +func (v *triStateValue) Type() string { + return "version" +} + +func setUpFlagSet(tristate *triStateValue) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + *tristate = triStateFalse + flag := f.VarPF(tristate, "tristate", "t", "tristate value (true, maybe or false)") + flag.NoOptDefVal = "true" + return f +} + +func TestExplicitTrue(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{"--tristate=true"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateTrue { + t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") + } +} + +func TestImplicitTrue(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{"--tristate"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateTrue { + t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") + } +} + +func TestShortFlag(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{"-t"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateTrue { + t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") + } +} + +func TestShortFlagExtraArgument(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + // The"maybe"turns into an arg, since short boolean options will only do true/false + err := f.Parse([]string{"-t", "maybe"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateTrue { + t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") + } + args := f.Args() + if len(args) != 1 || args[0] != "maybe" { + t.Fatal("expected an extra 'maybe' argument to stick around") + } +} + +func TestExplicitMaybe(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{"--tristate=maybe"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateMaybe { + t.Fatal("expected", triStateMaybe, "(triStateMaybe) but got", tristate, "instead") + } +} + +func TestExplicitFalse(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{"--tristate=false"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateFalse { + t.Fatal("expected", triStateFalse, "(triStateFalse) but got", tristate, "instead") + } +} + +func TestImplicitFalse(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateFalse { + t.Fatal("expected", triStateFalse, "(triStateFalse) but got", tristate, "instead") + } +} + +func TestInvalidValue(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + var buf bytes.Buffer + f.SetOutput(&buf) + err := f.Parse([]string{"--tristate=invalid"}) + if err == nil { + t.Fatal("expected an error but did not get any, tristate has value", tristate) + } +} + +func TestBoolP(t *testing.T) { + b := BoolP("bool", "b", false, "bool value in CommandLine") + c := BoolP("c", "c", false, "other bool value") + args := []string{"--bool"} + if err := CommandLine.Parse(args); err != nil { + t.Error("expected no error, got ", err) + } + if *b != true { + t.Errorf("expected b=true got b=%v", *b) + } + if *c != false { + t.Errorf("expect c=false got c=%v", *c) + } +} diff --git a/vendor/github.com/spf13/pflag/count_test.go b/vendor/github.com/spf13/pflag/count_test.go new file mode 100644 index 000000000000..460d96a6f173 --- /dev/null +++ b/vendor/github.com/spf13/pflag/count_test.go @@ -0,0 +1,52 @@ +package pflag + +import ( + "os" + "testing" +) + +func setUpCount(c *int) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.CountVarP(c, "verbose", "v", "a counter") + return f +} + +func TestCount(t *testing.T) { + testCases := []struct { + input []string + success bool + expected int + }{ + {[]string{"-vvv"}, true, 3}, + {[]string{"-v", "-v", "-v"}, true, 3}, + {[]string{"-v", "--verbose", "-v"}, true, 3}, + {[]string{"-v=3", "-v"}, true, 4}, + {[]string{"-v=a"}, false, 0}, + } + + devnull, _ := os.Open(os.DevNull) + os.Stderr = devnull + for i := range testCases { + var count int + f := setUpCount(&count) + + tc := &testCases[i] + + err := f.Parse(tc.input) + if err != nil && tc.success == true { + t.Errorf("expected success, got %q", err) + continue + } else if err == nil && tc.success == false { + t.Errorf("expected failure, got success") + continue + } else if tc.success { + c, err := f.GetCount("verbose") + if err != nil { + t.Errorf("Got error trying to fetch the counter flag") + } + if c != tc.expected { + t.Errorf("expected %q, got %q", tc.expected, c) + } + } + } +} diff --git a/vendor/github.com/spf13/pflag/example_test.go b/vendor/github.com/spf13/pflag/example_test.go new file mode 100644 index 000000000000..abd7806fad55 --- /dev/null +++ b/vendor/github.com/spf13/pflag/example_test.go @@ -0,0 +1,36 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag_test + +import ( + "fmt" + + "github.com/spf13/pflag" +) + +func ExampleShorthandLookup() { + name := "verbose" + short := name[:1] + + pflag.BoolP(name, short, false, "verbose output") + + // len(short) must be == 1 + flag := pflag.ShorthandLookup(short) + + fmt.Println(flag.Name) +} + +func ExampleFlagSet_ShorthandLookup() { + name := "verbose" + short := name[:1] + + fs := pflag.NewFlagSet("Example", pflag.ContinueOnError) + fs.BoolP(name, short, false, "verbose output") + + // len(short) must be == 1 + flag := fs.ShorthandLookup(short) + + fmt.Println(flag.Name) +} diff --git a/vendor/github.com/spf13/pflag/export_test.go b/vendor/github.com/spf13/pflag/export_test.go new file mode 100644 index 000000000000..9318fee00e17 --- /dev/null +++ b/vendor/github.com/spf13/pflag/export_test.go @@ -0,0 +1,29 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "io/ioutil" + "os" +) + +// Additional routines compiled into the package only during testing. + +// ResetForTesting clears all flag state and sets the usage function as directed. +// After calling ResetForTesting, parse errors in flag handling will not +// exit the program. +func ResetForTesting(usage func()) { + CommandLine = &FlagSet{ + name: os.Args[0], + errorHandling: ContinueOnError, + output: ioutil.Discard, + } + Usage = usage +} + +// GetCommandLine returns the default FlagSet. +func GetCommandLine() *FlagSet { + return CommandLine +} diff --git a/vendor/github.com/spf13/pflag/flag_test.go b/vendor/github.com/spf13/pflag/flag_test.go new file mode 100644 index 000000000000..29fec8671cf9 --- /dev/null +++ b/vendor/github.com/spf13/pflag/flag_test.go @@ -0,0 +1,1085 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "reflect" + "sort" + "strconv" + "strings" + "testing" + "time" +) + +var ( + testBool = Bool("test_bool", false, "bool value") + testInt = Int("test_int", 0, "int value") + testInt64 = Int64("test_int64", 0, "int64 value") + testUint = Uint("test_uint", 0, "uint value") + testUint64 = Uint64("test_uint64", 0, "uint64 value") + testString = String("test_string", "0", "string value") + testFloat = Float64("test_float64", 0, "float64 value") + testDuration = Duration("test_duration", 0, "time.Duration value") + testOptionalInt = Int("test_optional_int", 0, "optional int value") + normalizeFlagNameInvocations = 0 +) + +func boolString(s string) string { + if s == "0" { + return "false" + } + return "true" +} + +func TestEverything(t *testing.T) { + m := make(map[string]*Flag) + desired := "0" + visitor := func(f *Flag) { + if len(f.Name) > 5 && f.Name[0:5] == "test_" { + m[f.Name] = f + ok := false + switch { + case f.Value.String() == desired: + ok = true + case f.Name == "test_bool" && f.Value.String() == boolString(desired): + ok = true + case f.Name == "test_duration" && f.Value.String() == desired+"s": + ok = true + } + if !ok { + t.Error("Visit: bad value", f.Value.String(), "for", f.Name) + } + } + } + VisitAll(visitor) + if len(m) != 9 { + t.Error("VisitAll misses some flags") + for k, v := range m { + t.Log(k, *v) + } + } + m = make(map[string]*Flag) + Visit(visitor) + if len(m) != 0 { + t.Errorf("Visit sees unset flags") + for k, v := range m { + t.Log(k, *v) + } + } + // Now set all flags + Set("test_bool", "true") + Set("test_int", "1") + Set("test_int64", "1") + Set("test_uint", "1") + Set("test_uint64", "1") + Set("test_string", "1") + Set("test_float64", "1") + Set("test_duration", "1s") + Set("test_optional_int", "1") + desired = "1" + Visit(visitor) + if len(m) != 9 { + t.Error("Visit fails after set") + for k, v := range m { + t.Log(k, *v) + } + } + // Now test they're visited in sort order. + var flagNames []string + Visit(func(f *Flag) { flagNames = append(flagNames, f.Name) }) + if !sort.StringsAreSorted(flagNames) { + t.Errorf("flag names not sorted: %v", flagNames) + } +} + +func TestUsage(t *testing.T) { + called := false + ResetForTesting(func() { called = true }) + if GetCommandLine().Parse([]string{"--x"}) == nil { + t.Error("parse did not fail for unknown flag") + } + if !called { + t.Error("did not call Usage for unknown flag") + } +} + +func TestAddFlagSet(t *testing.T) { + oldSet := NewFlagSet("old", ContinueOnError) + newSet := NewFlagSet("new", ContinueOnError) + + oldSet.String("flag1", "flag1", "flag1") + oldSet.String("flag2", "flag2", "flag2") + + newSet.String("flag2", "flag2", "flag2") + newSet.String("flag3", "flag3", "flag3") + + oldSet.AddFlagSet(newSet) + + if len(oldSet.formal) != 3 { + t.Errorf("Unexpected result adding a FlagSet to a FlagSet %v", oldSet) + } +} + +func TestAnnotation(t *testing.T) { + f := NewFlagSet("shorthand", ContinueOnError) + + if err := f.SetAnnotation("missing-flag", "key", nil); err == nil { + t.Errorf("Expected error setting annotation on non-existent flag") + } + + f.StringP("stringa", "a", "", "string value") + if err := f.SetAnnotation("stringa", "key", nil); err != nil { + t.Errorf("Unexpected error setting new nil annotation: %v", err) + } + if annotation := f.Lookup("stringa").Annotations["key"]; annotation != nil { + t.Errorf("Unexpected annotation: %v", annotation) + } + + f.StringP("stringb", "b", "", "string2 value") + if err := f.SetAnnotation("stringb", "key", []string{"value1"}); err != nil { + t.Errorf("Unexpected error setting new annotation: %v", err) + } + if annotation := f.Lookup("stringb").Annotations["key"]; !reflect.DeepEqual(annotation, []string{"value1"}) { + t.Errorf("Unexpected annotation: %v", annotation) + } + + if err := f.SetAnnotation("stringb", "key", []string{"value2"}); err != nil { + t.Errorf("Unexpected error updating annotation: %v", err) + } + if annotation := f.Lookup("stringb").Annotations["key"]; !reflect.DeepEqual(annotation, []string{"value2"}) { + t.Errorf("Unexpected annotation: %v", annotation) + } +} + +func testParse(f *FlagSet, t *testing.T) { + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + boolFlag := f.Bool("bool", false, "bool value") + bool2Flag := f.Bool("bool2", false, "bool2 value") + bool3Flag := f.Bool("bool3", false, "bool3 value") + intFlag := f.Int("int", 0, "int value") + int8Flag := f.Int8("int8", 0, "int value") + int32Flag := f.Int32("int32", 0, "int value") + int64Flag := f.Int64("int64", 0, "int64 value") + uintFlag := f.Uint("uint", 0, "uint value") + uint8Flag := f.Uint8("uint8", 0, "uint value") + uint16Flag := f.Uint16("uint16", 0, "uint value") + uint32Flag := f.Uint32("uint32", 0, "uint value") + uint64Flag := f.Uint64("uint64", 0, "uint64 value") + stringFlag := f.String("string", "0", "string value") + float32Flag := f.Float32("float32", 0, "float32 value") + float64Flag := f.Float64("float64", 0, "float64 value") + ipFlag := f.IP("ip", net.ParseIP("127.0.0.1"), "ip value") + maskFlag := f.IPMask("mask", ParseIPv4Mask("0.0.0.0"), "mask value") + durationFlag := f.Duration("duration", 5*time.Second, "time.Duration value") + optionalIntNoValueFlag := f.Int("optional-int-no-value", 0, "int value") + f.Lookup("optional-int-no-value").NoOptDefVal = "9" + optionalIntWithValueFlag := f.Int("optional-int-with-value", 0, "int value") + f.Lookup("optional-int-no-value").NoOptDefVal = "9" + extra := "one-extra-argument" + args := []string{ + "--bool", + "--bool2=true", + "--bool3=false", + "--int=22", + "--int8=-8", + "--int32=-32", + "--int64=0x23", + "--uint", "24", + "--uint8=8", + "--uint16=16", + "--uint32=32", + "--uint64=25", + "--string=hello", + "--float32=-172e12", + "--float64=2718e28", + "--ip=10.11.12.13", + "--mask=255.255.255.0", + "--duration=2m", + "--optional-int-no-value", + "--optional-int-with-value=42", + extra, + } + if err := f.Parse(args); err != nil { + t.Fatal(err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *boolFlag != true { + t.Error("bool flag should be true, is ", *boolFlag) + } + if v, err := f.GetBool("bool"); err != nil || v != *boolFlag { + t.Error("GetBool does not work.") + } + if *bool2Flag != true { + t.Error("bool2 flag should be true, is ", *bool2Flag) + } + if *bool3Flag != false { + t.Error("bool3 flag should be false, is ", *bool2Flag) + } + if *intFlag != 22 { + t.Error("int flag should be 22, is ", *intFlag) + } + if v, err := f.GetInt("int"); err != nil || v != *intFlag { + t.Error("GetInt does not work.") + } + if *int8Flag != -8 { + t.Error("int8 flag should be 0x23, is ", *int8Flag) + } + if v, err := f.GetInt8("int8"); err != nil || v != *int8Flag { + t.Error("GetInt8 does not work.") + } + if *int32Flag != -32 { + t.Error("int32 flag should be 0x23, is ", *int32Flag) + } + if v, err := f.GetInt32("int32"); err != nil || v != *int32Flag { + t.Error("GetInt32 does not work.") + } + if *int64Flag != 0x23 { + t.Error("int64 flag should be 0x23, is ", *int64Flag) + } + if v, err := f.GetInt64("int64"); err != nil || v != *int64Flag { + t.Error("GetInt64 does not work.") + } + if *uintFlag != 24 { + t.Error("uint flag should be 24, is ", *uintFlag) + } + if v, err := f.GetUint("uint"); err != nil || v != *uintFlag { + t.Error("GetUint does not work.") + } + if *uint8Flag != 8 { + t.Error("uint8 flag should be 8, is ", *uint8Flag) + } + if v, err := f.GetUint8("uint8"); err != nil || v != *uint8Flag { + t.Error("GetUint8 does not work.") + } + if *uint16Flag != 16 { + t.Error("uint16 flag should be 16, is ", *uint16Flag) + } + if v, err := f.GetUint16("uint16"); err != nil || v != *uint16Flag { + t.Error("GetUint16 does not work.") + } + if *uint32Flag != 32 { + t.Error("uint32 flag should be 32, is ", *uint32Flag) + } + if v, err := f.GetUint32("uint32"); err != nil || v != *uint32Flag { + t.Error("GetUint32 does not work.") + } + if *uint64Flag != 25 { + t.Error("uint64 flag should be 25, is ", *uint64Flag) + } + if v, err := f.GetUint64("uint64"); err != nil || v != *uint64Flag { + t.Error("GetUint64 does not work.") + } + if *stringFlag != "hello" { + t.Error("string flag should be `hello`, is ", *stringFlag) + } + if v, err := f.GetString("string"); err != nil || v != *stringFlag { + t.Error("GetString does not work.") + } + if *float32Flag != -172e12 { + t.Error("float32 flag should be -172e12, is ", *float32Flag) + } + if v, err := f.GetFloat32("float32"); err != nil || v != *float32Flag { + t.Errorf("GetFloat32 returned %v but float32Flag was %v", v, *float32Flag) + } + if *float64Flag != 2718e28 { + t.Error("float64 flag should be 2718e28, is ", *float64Flag) + } + if v, err := f.GetFloat64("float64"); err != nil || v != *float64Flag { + t.Errorf("GetFloat64 returned %v but float64Flag was %v", v, *float64Flag) + } + if !(*ipFlag).Equal(net.ParseIP("10.11.12.13")) { + t.Error("ip flag should be 10.11.12.13, is ", *ipFlag) + } + if v, err := f.GetIP("ip"); err != nil || !v.Equal(*ipFlag) { + t.Errorf("GetIP returned %v but ipFlag was %v", v, *ipFlag) + } + if (*maskFlag).String() != ParseIPv4Mask("255.255.255.0").String() { + t.Error("mask flag should be 255.255.255.0, is ", (*maskFlag).String()) + } + if v, err := f.GetIPv4Mask("mask"); err != nil || v.String() != (*maskFlag).String() { + t.Errorf("GetIP returned %v maskFlag was %v error was %v", v, *maskFlag, err) + } + if *durationFlag != 2*time.Minute { + t.Error("duration flag should be 2m, is ", *durationFlag) + } + if v, err := f.GetDuration("duration"); err != nil || v != *durationFlag { + t.Error("GetDuration does not work.") + } + if _, err := f.GetInt("duration"); err == nil { + t.Error("GetInt parsed a time.Duration?!?!") + } + if *optionalIntNoValueFlag != 9 { + t.Error("optional int flag should be the default value, is ", *optionalIntNoValueFlag) + } + if *optionalIntWithValueFlag != 42 { + t.Error("optional int flag should be 42, is ", *optionalIntWithValueFlag) + } + if len(f.Args()) != 1 { + t.Error("expected one argument, got", len(f.Args())) + } else if f.Args()[0] != extra { + t.Errorf("expected argument %q got %q", extra, f.Args()[0]) + } +} + +func testParseAll(f *FlagSet, t *testing.T) { + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + f.BoolP("boola", "a", false, "bool value") + f.BoolP("boolb", "b", false, "bool2 value") + f.BoolP("boolc", "c", false, "bool3 value") + f.BoolP("boold", "d", false, "bool4 value") + f.StringP("stringa", "s", "0", "string value") + f.StringP("stringz", "z", "0", "string value") + f.StringP("stringx", "x", "0", "string value") + f.StringP("stringy", "y", "0", "string value") + f.Lookup("stringx").NoOptDefVal = "1" + args := []string{ + "-ab", + "-cs=xx", + "--stringz=something", + "-d=true", + "-x", + "-y", + "ee", + } + want := []string{ + "boola", "true", + "boolb", "true", + "boolc", "true", + "stringa", "xx", + "stringz", "something", + "boold", "true", + "stringx", "1", + "stringy", "ee", + } + got := []string{} + store := func(flag *Flag, value string) error { + got = append(got, flag.Name) + if len(value) > 0 { + got = append(got, value) + } + return nil + } + if err := f.ParseAll(args, store); err != nil { + t.Errorf("expected no error, got %s", err) + } + if !f.Parsed() { + t.Errorf("f.Parse() = false after Parse") + } + if !reflect.DeepEqual(got, want) { + t.Errorf("f.ParseAll() fail to restore the args") + t.Errorf("Got: %v", got) + t.Errorf("Want: %v", want) + } +} + +func TestShorthand(t *testing.T) { + f := NewFlagSet("shorthand", ContinueOnError) + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + boolaFlag := f.BoolP("boola", "a", false, "bool value") + boolbFlag := f.BoolP("boolb", "b", false, "bool2 value") + boolcFlag := f.BoolP("boolc", "c", false, "bool3 value") + booldFlag := f.BoolP("boold", "d", false, "bool4 value") + stringaFlag := f.StringP("stringa", "s", "0", "string value") + stringzFlag := f.StringP("stringz", "z", "0", "string value") + extra := "interspersed-argument" + notaflag := "--i-look-like-a-flag" + args := []string{ + "-ab", + extra, + "-cs", + "hello", + "-z=something", + "-d=true", + "--", + notaflag, + } + f.SetOutput(ioutil.Discard) + if err := f.Parse(args); err != nil { + t.Error("expected no error, got ", err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *boolaFlag != true { + t.Error("boola flag should be true, is ", *boolaFlag) + } + if *boolbFlag != true { + t.Error("boolb flag should be true, is ", *boolbFlag) + } + if *boolcFlag != true { + t.Error("boolc flag should be true, is ", *boolcFlag) + } + if *booldFlag != true { + t.Error("boold flag should be true, is ", *booldFlag) + } + if *stringaFlag != "hello" { + t.Error("stringa flag should be `hello`, is ", *stringaFlag) + } + if *stringzFlag != "something" { + t.Error("stringz flag should be `something`, is ", *stringzFlag) + } + if len(f.Args()) != 2 { + t.Error("expected one argument, got", len(f.Args())) + } else if f.Args()[0] != extra { + t.Errorf("expected argument %q got %q", extra, f.Args()[0]) + } else if f.Args()[1] != notaflag { + t.Errorf("expected argument %q got %q", notaflag, f.Args()[1]) + } + if f.ArgsLenAtDash() != 1 { + t.Errorf("expected argsLenAtDash %d got %d", f.ArgsLenAtDash(), 1) + } +} + +func TestShorthandLookup(t *testing.T) { + f := NewFlagSet("shorthand", ContinueOnError) + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + f.BoolP("boola", "a", false, "bool value") + f.BoolP("boolb", "b", false, "bool2 value") + args := []string{ + "-ab", + } + f.SetOutput(ioutil.Discard) + if err := f.Parse(args); err != nil { + t.Error("expected no error, got ", err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + flag := f.ShorthandLookup("a") + if flag == nil { + t.Errorf("f.ShorthandLookup(\"a\") returned nil") + } + if flag.Name != "boola" { + t.Errorf("f.ShorthandLookup(\"a\") found %q instead of \"boola\"", flag.Name) + } + flag = f.ShorthandLookup("") + if flag != nil { + t.Errorf("f.ShorthandLookup(\"\") did not return nil") + } + defer func() { + recover() + }() + flag = f.ShorthandLookup("ab") + // should NEVER get here. lookup should panic. defer'd func should recover it. + t.Errorf("f.ShorthandLookup(\"ab\") did not panic") +} + +func TestParse(t *testing.T) { + ResetForTesting(func() { t.Error("bad parse") }) + testParse(GetCommandLine(), t) +} + +func TestParseAll(t *testing.T) { + ResetForTesting(func() { t.Error("bad parse") }) + testParseAll(GetCommandLine(), t) +} + +func TestFlagSetParse(t *testing.T) { + testParse(NewFlagSet("test", ContinueOnError), t) +} + +func TestChangedHelper(t *testing.T) { + f := NewFlagSet("changedtest", ContinueOnError) + f.Bool("changed", false, "changed bool") + f.Bool("settrue", true, "true to true") + f.Bool("setfalse", false, "false to false") + f.Bool("unchanged", false, "unchanged bool") + + args := []string{"--changed", "--settrue", "--setfalse=false"} + if err := f.Parse(args); err != nil { + t.Error("f.Parse() = false after Parse") + } + if !f.Changed("changed") { + t.Errorf("--changed wasn't changed!") + } + if !f.Changed("settrue") { + t.Errorf("--settrue wasn't changed!") + } + if !f.Changed("setfalse") { + t.Errorf("--setfalse wasn't changed!") + } + if f.Changed("unchanged") { + t.Errorf("--unchanged was changed!") + } + if f.Changed("invalid") { + t.Errorf("--invalid was changed!") + } + if f.ArgsLenAtDash() != -1 { + t.Errorf("Expected argsLenAtDash: %d but got %d", -1, f.ArgsLenAtDash()) + } +} + +func replaceSeparators(name string, from []string, to string) string { + result := name + for _, sep := range from { + result = strings.Replace(result, sep, to, -1) + } + // Type convert to indicate normalization has been done. + return result +} + +func wordSepNormalizeFunc(f *FlagSet, name string) NormalizedName { + seps := []string{"-", "_"} + name = replaceSeparators(name, seps, ".") + normalizeFlagNameInvocations++ + + return NormalizedName(name) +} + +func testWordSepNormalizedNames(args []string, t *testing.T) { + f := NewFlagSet("normalized", ContinueOnError) + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + withDashFlag := f.Bool("with-dash-flag", false, "bool value") + // Set this after some flags have been added and before others. + f.SetNormalizeFunc(wordSepNormalizeFunc) + withUnderFlag := f.Bool("with_under_flag", false, "bool value") + withBothFlag := f.Bool("with-both_flag", false, "bool value") + if err := f.Parse(args); err != nil { + t.Fatal(err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *withDashFlag != true { + t.Error("withDashFlag flag should be true, is ", *withDashFlag) + } + if *withUnderFlag != true { + t.Error("withUnderFlag flag should be true, is ", *withUnderFlag) + } + if *withBothFlag != true { + t.Error("withBothFlag flag should be true, is ", *withBothFlag) + } +} + +func TestWordSepNormalizedNames(t *testing.T) { + args := []string{ + "--with-dash-flag", + "--with-under-flag", + "--with-both-flag", + } + testWordSepNormalizedNames(args, t) + + args = []string{ + "--with_dash_flag", + "--with_under_flag", + "--with_both_flag", + } + testWordSepNormalizedNames(args, t) + + args = []string{ + "--with-dash_flag", + "--with-under_flag", + "--with-both_flag", + } + testWordSepNormalizedNames(args, t) +} + +func aliasAndWordSepFlagNames(f *FlagSet, name string) NormalizedName { + seps := []string{"-", "_"} + + oldName := replaceSeparators("old-valid_flag", seps, ".") + newName := replaceSeparators("valid-flag", seps, ".") + + name = replaceSeparators(name, seps, ".") + switch name { + case oldName: + name = newName + break + } + + return NormalizedName(name) +} + +func TestCustomNormalizedNames(t *testing.T) { + f := NewFlagSet("normalized", ContinueOnError) + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + + validFlag := f.Bool("valid-flag", false, "bool value") + f.SetNormalizeFunc(aliasAndWordSepFlagNames) + someOtherFlag := f.Bool("some-other-flag", false, "bool value") + + args := []string{"--old_valid_flag", "--some-other_flag"} + if err := f.Parse(args); err != nil { + t.Fatal(err) + } + + if *validFlag != true { + t.Errorf("validFlag is %v even though we set the alias --old_valid_falg", *validFlag) + } + if *someOtherFlag != true { + t.Error("someOtherFlag should be true, is ", *someOtherFlag) + } +} + +// Every flag we add, the name (displayed also in usage) should normalized +func TestNormalizationFuncShouldChangeFlagName(t *testing.T) { + // Test normalization after addition + f := NewFlagSet("normalized", ContinueOnError) + + f.Bool("valid_flag", false, "bool value") + if f.Lookup("valid_flag").Name != "valid_flag" { + t.Error("The new flag should have the name 'valid_flag' instead of ", f.Lookup("valid_flag").Name) + } + + f.SetNormalizeFunc(wordSepNormalizeFunc) + if f.Lookup("valid_flag").Name != "valid.flag" { + t.Error("The new flag should have the name 'valid.flag' instead of ", f.Lookup("valid_flag").Name) + } + + // Test normalization before addition + f = NewFlagSet("normalized", ContinueOnError) + f.SetNormalizeFunc(wordSepNormalizeFunc) + + f.Bool("valid_flag", false, "bool value") + if f.Lookup("valid_flag").Name != "valid.flag" { + t.Error("The new flag should have the name 'valid.flag' instead of ", f.Lookup("valid_flag").Name) + } +} + +// Declare a user-defined flag type. +type flagVar []string + +func (f *flagVar) String() string { + return fmt.Sprint([]string(*f)) +} + +func (f *flagVar) Set(value string) error { + *f = append(*f, value) + return nil +} + +func (f *flagVar) Type() string { + return "flagVar" +} + +func TestUserDefined(t *testing.T) { + var flags FlagSet + flags.Init("test", ContinueOnError) + var v flagVar + flags.VarP(&v, "v", "v", "usage") + if err := flags.Parse([]string{"--v=1", "-v2", "-v", "3"}); err != nil { + t.Error(err) + } + if len(v) != 3 { + t.Fatal("expected 3 args; got ", len(v)) + } + expect := "[1 2 3]" + if v.String() != expect { + t.Errorf("expected value %q got %q", expect, v.String()) + } +} + +func TestSetOutput(t *testing.T) { + var flags FlagSet + var buf bytes.Buffer + flags.SetOutput(&buf) + flags.Init("test", ContinueOnError) + flags.Parse([]string{"--unknown"}) + if out := buf.String(); !strings.Contains(out, "--unknown") { + t.Logf("expected output mentioning unknown; got %q", out) + } +} + +// This tests that one can reset the flags. This still works but not well, and is +// superseded by FlagSet. +func TestChangingArgs(t *testing.T) { + ResetForTesting(func() { t.Fatal("bad parse") }) + oldArgs := os.Args + defer func() { os.Args = oldArgs }() + os.Args = []string{"cmd", "--before", "subcmd"} + before := Bool("before", false, "") + if err := GetCommandLine().Parse(os.Args[1:]); err != nil { + t.Fatal(err) + } + cmd := Arg(0) + os.Args = []string{"subcmd", "--after", "args"} + after := Bool("after", false, "") + Parse() + args := Args() + + if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { + t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) + } +} + +// Test that -help invokes the usage message and returns ErrHelp. +func TestHelp(t *testing.T) { + var helpCalled = false + fs := NewFlagSet("help test", ContinueOnError) + fs.Usage = func() { helpCalled = true } + var flag bool + fs.BoolVar(&flag, "flag", false, "regular flag") + // Regular flag invocation should work + err := fs.Parse([]string{"--flag=true"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + if !flag { + t.Error("flag was not set by --flag") + } + if helpCalled { + t.Error("help called for regular flag") + helpCalled = false // reset for next test + } + // Help flag should work as expected. + err = fs.Parse([]string{"--help"}) + if err == nil { + t.Fatal("error expected") + } + if err != ErrHelp { + t.Fatal("expected ErrHelp; got ", err) + } + if !helpCalled { + t.Fatal("help was not called") + } + // If we define a help flag, that should override. + var help bool + fs.BoolVar(&help, "help", false, "help flag") + helpCalled = false + err = fs.Parse([]string{"--help"}) + if err != nil { + t.Fatal("expected no error for defined --help; got ", err) + } + if helpCalled { + t.Fatal("help was called; should not have been for defined help flag") + } +} + +func TestNoInterspersed(t *testing.T) { + f := NewFlagSet("test", ContinueOnError) + f.SetInterspersed(false) + f.Bool("true", true, "always true") + f.Bool("false", false, "always false") + err := f.Parse([]string{"--true", "break", "--false"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + args := f.Args() + if len(args) != 2 || args[0] != "break" || args[1] != "--false" { + t.Fatal("expected interspersed options/non-options to fail") + } +} + +func TestTermination(t *testing.T) { + f := NewFlagSet("termination", ContinueOnError) + boolFlag := f.BoolP("bool", "l", false, "bool value") + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + arg1 := "ls" + arg2 := "-l" + args := []string{ + "--", + arg1, + arg2, + } + f.SetOutput(ioutil.Discard) + if err := f.Parse(args); err != nil { + t.Fatal("expected no error; got ", err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *boolFlag { + t.Error("expected boolFlag=false, got true") + } + if len(f.Args()) != 2 { + t.Errorf("expected 2 arguments, got %d: %v", len(f.Args()), f.Args()) + } + if f.Args()[0] != arg1 { + t.Errorf("expected argument %q got %q", arg1, f.Args()[0]) + } + if f.Args()[1] != arg2 { + t.Errorf("expected argument %q got %q", arg2, f.Args()[1]) + } + if f.ArgsLenAtDash() != 0 { + t.Errorf("expected argsLenAtDash %d got %d", 0, f.ArgsLenAtDash()) + } +} + +func TestDeprecatedFlagInDocs(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + f.Bool("badflag", true, "always true") + f.MarkDeprecated("badflag", "use --good-flag instead") + + out := new(bytes.Buffer) + f.SetOutput(out) + f.PrintDefaults() + + if strings.Contains(out.String(), "badflag") { + t.Errorf("found deprecated flag in usage!") + } +} + +func TestDeprecatedFlagShorthandInDocs(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + name := "noshorthandflag" + f.BoolP(name, "n", true, "always true") + f.MarkShorthandDeprecated("noshorthandflag", fmt.Sprintf("use --%s instead", name)) + + out := new(bytes.Buffer) + f.SetOutput(out) + f.PrintDefaults() + + if strings.Contains(out.String(), "-n,") { + t.Errorf("found deprecated flag shorthand in usage!") + } +} + +func parseReturnStderr(t *testing.T, f *FlagSet, args []string) (string, error) { + oldStderr := os.Stderr + r, w, _ := os.Pipe() + os.Stderr = w + + err := f.Parse(args) + + outC := make(chan string) + // copy the output in a separate goroutine so printing can't block indefinitely + go func() { + var buf bytes.Buffer + io.Copy(&buf, r) + outC <- buf.String() + }() + + w.Close() + os.Stderr = oldStderr + out := <-outC + + return out, err +} + +func TestDeprecatedFlagUsage(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + f.Bool("badflag", true, "always true") + usageMsg := "use --good-flag instead" + f.MarkDeprecated("badflag", usageMsg) + + args := []string{"--badflag"} + out, err := parseReturnStderr(t, f, args) + if err != nil { + t.Fatal("expected no error; got ", err) + } + + if !strings.Contains(out, usageMsg) { + t.Errorf("usageMsg not printed when using a deprecated flag!") + } +} + +func TestDeprecatedFlagShorthandUsage(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + name := "noshorthandflag" + f.BoolP(name, "n", true, "always true") + usageMsg := fmt.Sprintf("use --%s instead", name) + f.MarkShorthandDeprecated(name, usageMsg) + + args := []string{"-n"} + out, err := parseReturnStderr(t, f, args) + if err != nil { + t.Fatal("expected no error; got ", err) + } + + if !strings.Contains(out, usageMsg) { + t.Errorf("usageMsg not printed when using a deprecated flag!") + } +} + +func TestDeprecatedFlagUsageNormalized(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + f.Bool("bad-double_flag", true, "always true") + f.SetNormalizeFunc(wordSepNormalizeFunc) + usageMsg := "use --good-flag instead" + f.MarkDeprecated("bad_double-flag", usageMsg) + + args := []string{"--bad_double_flag"} + out, err := parseReturnStderr(t, f, args) + if err != nil { + t.Fatal("expected no error; got ", err) + } + + if !strings.Contains(out, usageMsg) { + t.Errorf("usageMsg not printed when using a deprecated flag!") + } +} + +// Name normalization function should be called only once on flag addition +func TestMultipleNormalizeFlagNameInvocations(t *testing.T) { + normalizeFlagNameInvocations = 0 + + f := NewFlagSet("normalized", ContinueOnError) + f.SetNormalizeFunc(wordSepNormalizeFunc) + f.Bool("with_under_flag", false, "bool value") + + if normalizeFlagNameInvocations != 1 { + t.Fatal("Expected normalizeFlagNameInvocations to be 1; got ", normalizeFlagNameInvocations) + } +} + +// +func TestHiddenFlagInUsage(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + f.Bool("secretFlag", true, "shhh") + f.MarkHidden("secretFlag") + + out := new(bytes.Buffer) + f.SetOutput(out) + f.PrintDefaults() + + if strings.Contains(out.String(), "secretFlag") { + t.Errorf("found hidden flag in usage!") + } +} + +// +func TestHiddenFlagUsage(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + f.Bool("secretFlag", true, "shhh") + f.MarkHidden("secretFlag") + + args := []string{"--secretFlag"} + out, err := parseReturnStderr(t, f, args) + if err != nil { + t.Fatal("expected no error; got ", err) + } + + if strings.Contains(out, "shhh") { + t.Errorf("usage message printed when using a hidden flag!") + } +} + +const defaultOutput = ` --A for bootstrapping, allow 'any' type + --Alongflagname disable bounds checking + -C, --CCC a boolean defaulting to true (default true) + --D path set relative path for local imports + -E, --EEE num[=1234] a num with NoOptDefVal (default 4321) + --F number a non-zero number (default 2.7) + --G float a float that defaults to zero + --IP ip IP address with no default + --IPMask ipMask Netmask address with no default + --IPNet ipNet IP network with no default + --Ints ints int slice with zero default + --N int a non-zero int (default 27) + --ND1 string[="bar"] a string with NoOptDefVal (default "foo") + --ND2 num[=4321] a num with NoOptDefVal (default 1234) + --StringArray stringArray string array with zero default + --StringSlice strings string slice with zero default + --Z int an int that defaults to zero + --custom custom custom Value implementation + --customP custom a VarP with default (default 10) + --maxT timeout set timeout for dial +` + +// Custom value that satisfies the Value interface. +type customValue int + +func (cv *customValue) String() string { return fmt.Sprintf("%v", *cv) } + +func (cv *customValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *cv = customValue(v) + return err +} + +func (cv *customValue) Type() string { return "custom" } + +func TestPrintDefaults(t *testing.T) { + fs := NewFlagSet("print defaults test", ContinueOnError) + var buf bytes.Buffer + fs.SetOutput(&buf) + fs.Bool("A", false, "for bootstrapping, allow 'any' type") + fs.Bool("Alongflagname", false, "disable bounds checking") + fs.BoolP("CCC", "C", true, "a boolean defaulting to true") + fs.String("D", "", "set relative `path` for local imports") + fs.Float64("F", 2.7, "a non-zero `number`") + fs.Float64("G", 0, "a float that defaults to zero") + fs.Int("N", 27, "a non-zero int") + fs.IntSlice("Ints", []int{}, "int slice with zero default") + fs.IP("IP", nil, "IP address with no default") + fs.IPMask("IPMask", nil, "Netmask address with no default") + fs.IPNet("IPNet", net.IPNet{}, "IP network with no default") + fs.Int("Z", 0, "an int that defaults to zero") + fs.Duration("maxT", 0, "set `timeout` for dial") + fs.String("ND1", "foo", "a string with NoOptDefVal") + fs.Lookup("ND1").NoOptDefVal = "bar" + fs.Int("ND2", 1234, "a `num` with NoOptDefVal") + fs.Lookup("ND2").NoOptDefVal = "4321" + fs.IntP("EEE", "E", 4321, "a `num` with NoOptDefVal") + fs.ShorthandLookup("E").NoOptDefVal = "1234" + fs.StringSlice("StringSlice", []string{}, "string slice with zero default") + fs.StringArray("StringArray", []string{}, "string array with zero default") + + var cv customValue + fs.Var(&cv, "custom", "custom Value implementation") + + cv2 := customValue(10) + fs.VarP(&cv2, "customP", "", "a VarP with default") + + fs.PrintDefaults() + got := buf.String() + if got != defaultOutput { + fmt.Println("\n" + got) + fmt.Println("\n" + defaultOutput) + t.Errorf("got %q want %q\n", got, defaultOutput) + } +} + +func TestVisitAllFlagOrder(t *testing.T) { + fs := NewFlagSet("TestVisitAllFlagOrder", ContinueOnError) + fs.SortFlags = false + // https://github.com/spf13/pflag/issues/120 + fs.SetNormalizeFunc(func(f *FlagSet, name string) NormalizedName { + return NormalizedName(name) + }) + + names := []string{"C", "B", "A", "D"} + for _, name := range names { + fs.Bool(name, false, "") + } + + i := 0 + fs.VisitAll(func(f *Flag) { + if names[i] != f.Name { + t.Errorf("Incorrect order. Expected %v, got %v", names[i], f.Name) + } + i++ + }) +} + +func TestVisitFlagOrder(t *testing.T) { + fs := NewFlagSet("TestVisitFlagOrder", ContinueOnError) + fs.SortFlags = false + names := []string{"C", "B", "A", "D"} + for _, name := range names { + fs.Bool(name, false, "") + fs.Set(name, "true") + } + + i := 0 + fs.Visit(func(f *Flag) { + if names[i] != f.Name { + t.Errorf("Incorrect order. Expected %v, got %v", names[i], f.Name) + } + i++ + }) +} diff --git a/vendor/github.com/spf13/pflag/golangflag_test.go b/vendor/github.com/spf13/pflag/golangflag_test.go new file mode 100644 index 000000000000..77e2d7d80a3d --- /dev/null +++ b/vendor/github.com/spf13/pflag/golangflag_test.go @@ -0,0 +1,39 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + goflag "flag" + "testing" +) + +func TestGoflags(t *testing.T) { + goflag.String("stringFlag", "stringFlag", "stringFlag") + goflag.Bool("boolFlag", false, "boolFlag") + + f := NewFlagSet("test", ContinueOnError) + + f.AddGoFlagSet(goflag.CommandLine) + err := f.Parse([]string{"--stringFlag=bob", "--boolFlag"}) + if err != nil { + t.Fatal("expected no error; get", err) + } + + getString, err := f.GetString("stringFlag") + if err != nil { + t.Fatal("expected no error; get", err) + } + if getString != "bob" { + t.Fatalf("expected getString=bob but got getString=%s", getString) + } + + getBool, err := f.GetBool("boolFlag") + if err != nil { + t.Fatal("expected no error; get", err) + } + if getBool != true { + t.Fatalf("expected getBool=true but got getBool=%v", getBool) + } +} diff --git a/vendor/github.com/spf13/pflag/int_slice_test.go b/vendor/github.com/spf13/pflag/int_slice_test.go new file mode 100644 index 000000000000..745aecb950a2 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int_slice_test.go @@ -0,0 +1,165 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "fmt" + "strconv" + "strings" + "testing" +) + +func setUpISFlagSet(isp *[]int) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.IntSliceVar(isp, "is", []int{}, "Command separated list!") + return f +} + +func setUpISFlagSetWithDefault(isp *[]int) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.IntSliceVar(isp, "is", []int{0, 1}, "Command separated list!") + return f +} + +func TestEmptyIS(t *testing.T) { + var is []int + f := setUpISFlagSet(&is) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getIS, err := f.GetIntSlice("is") + if err != nil { + t.Fatal("got an error from GetIntSlice():", err) + } + if len(getIS) != 0 { + t.Fatalf("got is %v with len=%d but expected length=0", getIS, len(getIS)) + } +} + +func TestIS(t *testing.T) { + var is []int + f := setUpISFlagSet(&is) + + vals := []string{"1", "2", "4", "3"} + arg := fmt.Sprintf("--is=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %s but got: %d", i, vals[i], v) + } + } + getIS, err := f.GetIntSlice("is") + if err != nil { + t.Fatalf("got error: %v", err) + } + for i, v := range getIS { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %s but got: %d from GetIntSlice", i, vals[i], v) + } + } +} + +func TestISDefault(t *testing.T) { + var is []int + f := setUpISFlagSetWithDefault(&is) + + vals := []string{"0", "1"} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %d but got: %d", i, d, v) + } + } + + getIS, err := f.GetIntSlice("is") + if err != nil { + t.Fatal("got an error from GetIntSlice():", err) + } + for i, v := range getIS { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatal("got an error from GetIntSlice():", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %d from GetIntSlice but got: %d", i, d, v) + } + } +} + +func TestISWithDefault(t *testing.T) { + var is []int + f := setUpISFlagSetWithDefault(&is) + + vals := []string{"1", "2"} + arg := fmt.Sprintf("--is=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %d but got: %d", i, d, v) + } + } + + getIS, err := f.GetIntSlice("is") + if err != nil { + t.Fatal("got an error from GetIntSlice():", err) + } + for i, v := range getIS { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %d from GetIntSlice but got: %d", i, d, v) + } + } +} + +func TestISCalledTwice(t *testing.T) { + var is []int + f := setUpISFlagSet(&is) + + in := []string{"1,2", "3"} + expected := []int{1, 2, 3} + argfmt := "--is=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + if expected[i] != v { + t.Fatalf("expected is[%d] to be %d but got: %d", i, expected[i], v) + } + } +} diff --git a/vendor/github.com/spf13/pflag/ip_slice_test.go b/vendor/github.com/spf13/pflag/ip_slice_test.go new file mode 100644 index 000000000000..b0c681c5b2ac --- /dev/null +++ b/vendor/github.com/spf13/pflag/ip_slice_test.go @@ -0,0 +1,222 @@ +package pflag + +import ( + "fmt" + "net" + "strings" + "testing" +) + +func setUpIPSFlagSet(ipsp *[]net.IP) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.IPSliceVar(ipsp, "ips", []net.IP{}, "Command separated list!") + return f +} + +func setUpIPSFlagSetWithDefault(ipsp *[]net.IP) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.IPSliceVar(ipsp, "ips", + []net.IP{ + net.ParseIP("192.168.1.1"), + net.ParseIP("0:0:0:0:0:0:0:1"), + }, + "Command separated list!") + return f +} + +func TestEmptyIP(t *testing.T) { + var ips []net.IP + f := setUpIPSFlagSet(&ips) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getIPS, err := f.GetIPSlice("ips") + if err != nil { + t.Fatal("got an error from GetIPSlice():", err) + } + if len(getIPS) != 0 { + t.Fatalf("got ips %v with len=%d but expected length=0", getIPS, len(getIPS)) + } +} + +func TestIPS(t *testing.T) { + var ips []net.IP + f := setUpIPSFlagSet(&ips) + + vals := []string{"192.168.1.1", "10.0.0.1", "0:0:0:0:0:0:0:2"} + arg := fmt.Sprintf("--ips=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range ips { + if ip := net.ParseIP(vals[i]); ip == nil { + t.Fatalf("invalid string being converted to IP address: %s", vals[i]) + } else if !ip.Equal(v) { + t.Fatalf("expected ips[%d] to be %s but got: %s from GetIPSlice", i, vals[i], v) + } + } +} + +func TestIPSDefault(t *testing.T) { + var ips []net.IP + f := setUpIPSFlagSetWithDefault(&ips) + + vals := []string{"192.168.1.1", "0:0:0:0:0:0:0:1"} + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range ips { + if ip := net.ParseIP(vals[i]); ip == nil { + t.Fatalf("invalid string being converted to IP address: %s", vals[i]) + } else if !ip.Equal(v) { + t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v) + } + } + + getIPS, err := f.GetIPSlice("ips") + if err != nil { + t.Fatal("got an error from GetIPSlice") + } + for i, v := range getIPS { + if ip := net.ParseIP(vals[i]); ip == nil { + t.Fatalf("invalid string being converted to IP address: %s", vals[i]) + } else if !ip.Equal(v) { + t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v) + } + } +} + +func TestIPSWithDefault(t *testing.T) { + var ips []net.IP + f := setUpIPSFlagSetWithDefault(&ips) + + vals := []string{"192.168.1.1", "0:0:0:0:0:0:0:1"} + arg := fmt.Sprintf("--ips=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range ips { + if ip := net.ParseIP(vals[i]); ip == nil { + t.Fatalf("invalid string being converted to IP address: %s", vals[i]) + } else if !ip.Equal(v) { + t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v) + } + } + + getIPS, err := f.GetIPSlice("ips") + if err != nil { + t.Fatal("got an error from GetIPSlice") + } + for i, v := range getIPS { + if ip := net.ParseIP(vals[i]); ip == nil { + t.Fatalf("invalid string being converted to IP address: %s", vals[i]) + } else if !ip.Equal(v) { + t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v) + } + } +} + +func TestIPSCalledTwice(t *testing.T) { + var ips []net.IP + f := setUpIPSFlagSet(&ips) + + in := []string{"192.168.1.2,0:0:0:0:0:0:0:1", "10.0.0.1"} + expected := []net.IP{net.ParseIP("192.168.1.2"), net.ParseIP("0:0:0:0:0:0:0:1"), net.ParseIP("10.0.0.1")} + argfmt := "ips=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range ips { + if !expected[i].Equal(v) { + t.Fatalf("expected ips[%d] to be %s but got: %s", i, expected[i], v) + } + } +} + +func TestIPSBadQuoting(t *testing.T) { + + tests := []struct { + Want []net.IP + FlagArg []string + }{ + { + Want: []net.IP{ + net.ParseIP("a4ab:61d:f03e:5d7d:fad7:d4c2:a1a5:568"), + net.ParseIP("203.107.49.208"), + net.ParseIP("14.57.204.90"), + }, + FlagArg: []string{ + "a4ab:61d:f03e:5d7d:fad7:d4c2:a1a5:568", + "203.107.49.208", + "14.57.204.90", + }, + }, + { + Want: []net.IP{ + net.ParseIP("204.228.73.195"), + net.ParseIP("86.141.15.94"), + }, + FlagArg: []string{ + "204.228.73.195", + "86.141.15.94", + }, + }, + { + Want: []net.IP{ + net.ParseIP("c70c:db36:3001:890f:c6ea:3f9b:7a39:cc3f"), + net.ParseIP("4d17:1d6e:e699:bd7a:88c5:5e7e:ac6a:4472"), + }, + FlagArg: []string{ + "c70c:db36:3001:890f:c6ea:3f9b:7a39:cc3f", + "4d17:1d6e:e699:bd7a:88c5:5e7e:ac6a:4472", + }, + }, + { + Want: []net.IP{ + net.ParseIP("5170:f971:cfac:7be3:512a:af37:952c:bc33"), + net.ParseIP("93.21.145.140"), + net.ParseIP("2cac:61d3:c5ff:6caf:73e0:1b1a:c336:c1ca"), + }, + FlagArg: []string{ + " 5170:f971:cfac:7be3:512a:af37:952c:bc33 , 93.21.145.140 ", + "2cac:61d3:c5ff:6caf:73e0:1b1a:c336:c1ca", + }, + }, + { + Want: []net.IP{ + net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"), + net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"), + net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"), + net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"), + }, + FlagArg: []string{ + `"2e5e:66b2:6441:848:5b74:76ea:574c:3a7b, 2e5e:66b2:6441:848:5b74:76ea:574c:3a7b,2e5e:66b2:6441:848:5b74:76ea:574c:3a7b "`, + " 2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"}, + }, + } + + for i, test := range tests { + + var ips []net.IP + f := setUpIPSFlagSet(&ips) + + if err := f.Parse([]string{fmt.Sprintf("--ips=%s", strings.Join(test.FlagArg, ","))}); err != nil { + t.Fatalf("flag parsing failed with error: %s\nparsing:\t%#v\nwant:\t\t%s", + err, test.FlagArg, test.Want[i]) + } + + for j, b := range ips { + if !b.Equal(test.Want[j]) { + t.Fatalf("bad value parsed for test %d on net.IP %d:\nwant:\t%s\ngot:\t%s", i, j, test.Want[j], b) + } + } + } +} diff --git a/vendor/github.com/spf13/pflag/ip_test.go b/vendor/github.com/spf13/pflag/ip_test.go new file mode 100644 index 000000000000..1fec50e4252d --- /dev/null +++ b/vendor/github.com/spf13/pflag/ip_test.go @@ -0,0 +1,63 @@ +package pflag + +import ( + "fmt" + "net" + "os" + "testing" +) + +func setUpIP(ip *net.IP) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.IPVar(ip, "address", net.ParseIP("0.0.0.0"), "IP Address") + return f +} + +func TestIP(t *testing.T) { + testCases := []struct { + input string + success bool + expected string + }{ + {"0.0.0.0", true, "0.0.0.0"}, + {" 0.0.0.0 ", true, "0.0.0.0"}, + {"1.2.3.4", true, "1.2.3.4"}, + {"127.0.0.1", true, "127.0.0.1"}, + {"255.255.255.255", true, "255.255.255.255"}, + {"", false, ""}, + {"0", false, ""}, + {"localhost", false, ""}, + {"0.0.0", false, ""}, + {"0.0.0.", false, ""}, + {"0.0.0.0.", false, ""}, + {"0.0.0.256", false, ""}, + {"0 . 0 . 0 . 0", false, ""}, + } + + devnull, _ := os.Open(os.DevNull) + os.Stderr = devnull + for i := range testCases { + var addr net.IP + f := setUpIP(&addr) + + tc := &testCases[i] + + arg := fmt.Sprintf("--address=%s", tc.input) + err := f.Parse([]string{arg}) + if err != nil && tc.success == true { + t.Errorf("expected success, got %q", err) + continue + } else if err == nil && tc.success == false { + t.Errorf("expected failure") + continue + } else if tc.success { + ip, err := f.GetIP("address") + if err != nil { + t.Errorf("Got error trying to fetch the IP flag: %v", err) + } + if ip.String() != tc.expected { + t.Errorf("expected %q, got %q", tc.expected, ip.String()) + } + } + } +} diff --git a/vendor/github.com/spf13/pflag/ipnet_test.go b/vendor/github.com/spf13/pflag/ipnet_test.go new file mode 100644 index 000000000000..335b6fa156d3 --- /dev/null +++ b/vendor/github.com/spf13/pflag/ipnet_test.go @@ -0,0 +1,70 @@ +package pflag + +import ( + "fmt" + "net" + "os" + "testing" +) + +func setUpIPNet(ip *net.IPNet) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + _, def, _ := net.ParseCIDR("0.0.0.0/0") + f.IPNetVar(ip, "address", *def, "IP Address") + return f +} + +func TestIPNet(t *testing.T) { + testCases := []struct { + input string + success bool + expected string + }{ + {"0.0.0.0/0", true, "0.0.0.0/0"}, + {" 0.0.0.0/0 ", true, "0.0.0.0/0"}, + {"1.2.3.4/8", true, "1.0.0.0/8"}, + {"127.0.0.1/16", true, "127.0.0.0/16"}, + {"255.255.255.255/19", true, "255.255.224.0/19"}, + {"255.255.255.255/32", true, "255.255.255.255/32"}, + {"", false, ""}, + {"/0", false, ""}, + {"0", false, ""}, + {"0/0", false, ""}, + {"localhost/0", false, ""}, + {"0.0.0/4", false, ""}, + {"0.0.0./8", false, ""}, + {"0.0.0.0./12", false, ""}, + {"0.0.0.256/16", false, ""}, + {"0.0.0.0 /20", false, ""}, + {"0.0.0.0/ 24", false, ""}, + {"0 . 0 . 0 . 0 / 28", false, ""}, + {"0.0.0.0/33", false, ""}, + } + + devnull, _ := os.Open(os.DevNull) + os.Stderr = devnull + for i := range testCases { + var addr net.IPNet + f := setUpIPNet(&addr) + + tc := &testCases[i] + + arg := fmt.Sprintf("--address=%s", tc.input) + err := f.Parse([]string{arg}) + if err != nil && tc.success == true { + t.Errorf("expected success, got %q", err) + continue + } else if err == nil && tc.success == false { + t.Errorf("expected failure") + continue + } else if tc.success { + ip, err := f.GetIPNet("address") + if err != nil { + t.Errorf("Got error trying to fetch the IP flag: %v", err) + } + if ip.String() != tc.expected { + t.Errorf("expected %q, got %q", tc.expected, ip.String()) + } + } + } +} diff --git a/vendor/github.com/spf13/pflag/string_array_test.go b/vendor/github.com/spf13/pflag/string_array_test.go new file mode 100644 index 000000000000..1ceac8c6c699 --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_array_test.go @@ -0,0 +1,233 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "fmt" + "testing" +) + +func setUpSAFlagSet(sap *[]string) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.StringArrayVar(sap, "sa", []string{}, "Command separated list!") + return f +} + +func setUpSAFlagSetWithDefault(sap *[]string) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.StringArrayVar(sap, "sa", []string{"default", "values"}, "Command separated list!") + return f +} + +func TestEmptySA(t *testing.T) { + var sa []string + f := setUpSAFlagSet(&sa) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getSA, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("got an error from GetStringArray():", err) + } + if len(getSA) != 0 { + t.Fatalf("got sa %v with len=%d but expected length=0", getSA, len(getSA)) + } +} + +func TestEmptySAValue(t *testing.T) { + var sa []string + f := setUpSAFlagSet(&sa) + err := f.Parse([]string{"--sa="}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getSA, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("got an error from GetStringArray():", err) + } + if len(getSA) != 0 { + t.Fatalf("got sa %v with len=%d but expected length=0", getSA, len(getSA)) + } +} + +func TestSADefault(t *testing.T) { + var sa []string + f := setUpSAFlagSetWithDefault(&sa) + + vals := []string{"default", "values"} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range sa { + if vals[i] != v { + t.Fatalf("expected sa[%d] to be %s but got: %s", i, vals[i], v) + } + } + + getSA, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("got an error from GetStringArray():", err) + } + for i, v := range getSA { + if vals[i] != v { + t.Fatalf("expected sa[%d] to be %s from GetStringArray but got: %s", i, vals[i], v) + } + } +} + +func TestSAWithDefault(t *testing.T) { + var sa []string + f := setUpSAFlagSetWithDefault(&sa) + + val := "one" + arg := fmt.Sprintf("--sa=%s", val) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(sa) != 1 { + t.Fatalf("expected number of values to be %d but %d", 1, len(sa)) + } + + if sa[0] != val { + t.Fatalf("expected value to be %s but got: %s", sa[0], val) + } + + getSA, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("got an error from GetStringArray():", err) + } + + if len(getSA) != 1 { + t.Fatalf("expected number of values to be %d but %d", 1, len(getSA)) + } + + if getSA[0] != val { + t.Fatalf("expected value to be %s but got: %s", getSA[0], val) + } +} + +func TestSACalledTwice(t *testing.T) { + var sa []string + f := setUpSAFlagSet(&sa) + + in := []string{"one", "two"} + expected := []string{"one", "two"} + argfmt := "--sa=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(sa) { + t.Fatalf("expected number of sa to be %d but got: %d", len(expected), len(sa)) + } + for i, v := range sa { + if expected[i] != v { + t.Fatalf("expected sa[%d] to be %s but got: %s", i, expected[i], v) + } + } + + values, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(values) { + t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(sa)) + } + for i, v := range values { + if expected[i] != v { + t.Fatalf("expected got sa[%d] to be %s but got: %s", i, expected[i], v) + } + } +} + +func TestSAWithSpecialChar(t *testing.T) { + var sa []string + f := setUpSAFlagSet(&sa) + + in := []string{"one,two", `"three"`, `"four,five",six`, "seven eight"} + expected := []string{"one,two", `"three"`, `"four,five",six`, "seven eight"} + argfmt := "--sa=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + arg3 := fmt.Sprintf(argfmt, in[2]) + arg4 := fmt.Sprintf(argfmt, in[3]) + err := f.Parse([]string{arg1, arg2, arg3, arg4}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(sa) { + t.Fatalf("expected number of sa to be %d but got: %d", len(expected), len(sa)) + } + for i, v := range sa { + if expected[i] != v { + t.Fatalf("expected sa[%d] to be %s but got: %s", i, expected[i], v) + } + } + + values, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(values) { + t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values)) + } + for i, v := range values { + if expected[i] != v { + t.Fatalf("expected got sa[%d] to be %s but got: %s", i, expected[i], v) + } + } +} + +func TestSAWithSquareBrackets(t *testing.T) { + var sa []string + f := setUpSAFlagSet(&sa) + + in := []string{"][]-[", "[a-z]", "[a-z]+"} + expected := []string{"][]-[", "[a-z]", "[a-z]+"} + argfmt := "--sa=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + arg3 := fmt.Sprintf(argfmt, in[2]) + err := f.Parse([]string{arg1, arg2, arg3}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(sa) { + t.Fatalf("expected number of sa to be %d but got: %d", len(expected), len(sa)) + } + for i, v := range sa { + if expected[i] != v { + t.Fatalf("expected sa[%d] to be %s but got: %s", i, expected[i], v) + } + } + + values, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(values) { + t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values)) + } + for i, v := range values { + if expected[i] != v { + t.Fatalf("expected got sa[%d] to be %s but got: %s", i, expected[i], v) + } + } +} diff --git a/vendor/github.com/spf13/pflag/string_slice_test.go b/vendor/github.com/spf13/pflag/string_slice_test.go new file mode 100644 index 000000000000..c41f3bd660d5 --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_slice_test.go @@ -0,0 +1,253 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "fmt" + "strings" + "testing" +) + +func setUpSSFlagSet(ssp *[]string) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.StringSliceVar(ssp, "ss", []string{}, "Command separated list!") + return f +} + +func setUpSSFlagSetWithDefault(ssp *[]string) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.StringSliceVar(ssp, "ss", []string{"default", "values"}, "Command separated list!") + return f +} + +func TestEmptySS(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getSS, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("got an error from GetStringSlice():", err) + } + if len(getSS) != 0 { + t.Fatalf("got ss %v with len=%d but expected length=0", getSS, len(getSS)) + } +} + +func TestEmptySSValue(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + err := f.Parse([]string{"--ss="}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getSS, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("got an error from GetStringSlice():", err) + } + if len(getSS) != 0 { + t.Fatalf("got ss %v with len=%d but expected length=0", getSS, len(getSS)) + } +} + +func TestSS(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + + vals := []string{"one", "two", "4", "3"} + arg := fmt.Sprintf("--ss=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range ss { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s but got: %s", i, vals[i], v) + } + } + + getSS, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("got an error from GetStringSlice():", err) + } + for i, v := range getSS { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s from GetStringSlice but got: %s", i, vals[i], v) + } + } +} + +func TestSSDefault(t *testing.T) { + var ss []string + f := setUpSSFlagSetWithDefault(&ss) + + vals := []string{"default", "values"} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range ss { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s but got: %s", i, vals[i], v) + } + } + + getSS, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("got an error from GetStringSlice():", err) + } + for i, v := range getSS { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s from GetStringSlice but got: %s", i, vals[i], v) + } + } +} + +func TestSSWithDefault(t *testing.T) { + var ss []string + f := setUpSSFlagSetWithDefault(&ss) + + vals := []string{"one", "two", "4", "3"} + arg := fmt.Sprintf("--ss=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range ss { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s but got: %s", i, vals[i], v) + } + } + + getSS, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("got an error from GetStringSlice():", err) + } + for i, v := range getSS { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s from GetStringSlice but got: %s", i, vals[i], v) + } + } +} + +func TestSSCalledTwice(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + + in := []string{"one,two", "three"} + expected := []string{"one", "two", "three"} + argfmt := "--ss=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(ss) { + t.Fatalf("expected number of ss to be %d but got: %d", len(expected), len(ss)) + } + for i, v := range ss { + if expected[i] != v { + t.Fatalf("expected ss[%d] to be %s but got: %s", i, expected[i], v) + } + } + + values, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(values) { + t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(ss)) + } + for i, v := range values { + if expected[i] != v { + t.Fatalf("expected got ss[%d] to be %s but got: %s", i, expected[i], v) + } + } +} + +func TestSSWithComma(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + + in := []string{`"one,two"`, `"three"`, `"four,five",six`} + expected := []string{"one,two", "three", "four,five", "six"} + argfmt := "--ss=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + arg3 := fmt.Sprintf(argfmt, in[2]) + err := f.Parse([]string{arg1, arg2, arg3}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(ss) { + t.Fatalf("expected number of ss to be %d but got: %d", len(expected), len(ss)) + } + for i, v := range ss { + if expected[i] != v { + t.Fatalf("expected ss[%d] to be %s but got: %s", i, expected[i], v) + } + } + + values, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(values) { + t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values)) + } + for i, v := range values { + if expected[i] != v { + t.Fatalf("expected got ss[%d] to be %s but got: %s", i, expected[i], v) + } + } +} + +func TestSSWithSquareBrackets(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + + in := []string{`"[a-z]"`, `"[a-z]+"`} + expected := []string{"[a-z]", "[a-z]+"} + argfmt := "--ss=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(ss) { + t.Fatalf("expected number of ss to be %d but got: %d", len(expected), len(ss)) + } + for i, v := range ss { + if expected[i] != v { + t.Fatalf("expected ss[%d] to be %s but got: %s", i, expected[i], v) + } + } + + values, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(values) { + t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values)) + } + for i, v := range values { + if expected[i] != v { + t.Fatalf("expected got ss[%d] to be %s but got: %s", i, expected[i], v) + } + } +} diff --git a/vendor/github.com/spf13/pflag/uint_slice_test.go b/vendor/github.com/spf13/pflag/uint_slice_test.go new file mode 100644 index 000000000000..db1a19dc2d64 --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint_slice_test.go @@ -0,0 +1,161 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" + "testing" +) + +func setUpUISFlagSet(uisp *[]uint) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.UintSliceVar(uisp, "uis", []uint{}, "Command separated list!") + return f +} + +func setUpUISFlagSetWithDefault(uisp *[]uint) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.UintSliceVar(uisp, "uis", []uint{0, 1}, "Command separated list!") + return f +} + +func TestEmptyUIS(t *testing.T) { + var uis []uint + f := setUpUISFlagSet(&uis) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getUIS, err := f.GetUintSlice("uis") + if err != nil { + t.Fatal("got an error from GetUintSlice():", err) + } + if len(getUIS) != 0 { + t.Fatalf("got is %v with len=%d but expected length=0", getUIS, len(getUIS)) + } +} + +func TestUIS(t *testing.T) { + var uis []uint + f := setUpUISFlagSet(&uis) + + vals := []string{"1", "2", "4", "3"} + arg := fmt.Sprintf("--uis=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range uis { + u, err := strconv.ParseUint(vals[i], 10, 0) + if err != nil { + t.Fatalf("got error: %v", err) + } + if uint(u) != v { + t.Fatalf("expected uis[%d] to be %s but got %d", i, vals[i], v) + } + } + getUIS, err := f.GetUintSlice("uis") + if err != nil { + t.Fatalf("got error: %v", err) + } + for i, v := range getUIS { + u, err := strconv.ParseUint(vals[i], 10, 0) + if err != nil { + t.Fatalf("got error: %v", err) + } + if uint(u) != v { + t.Fatalf("expected uis[%d] to be %s but got: %d from GetUintSlice", i, vals[i], v) + } + } +} + +func TestUISDefault(t *testing.T) { + var uis []uint + f := setUpUISFlagSetWithDefault(&uis) + + vals := []string{"0", "1"} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range uis { + u, err := strconv.ParseUint(vals[i], 10, 0) + if err != nil { + t.Fatalf("got error: %v", err) + } + if uint(u) != v { + t.Fatalf("expect uis[%d] to be %d but got: %d", i, u, v) + } + } + + getUIS, err := f.GetUintSlice("uis") + if err != nil { + t.Fatal("got an error from GetUintSlice():", err) + } + for i, v := range getUIS { + u, err := strconv.ParseUint(vals[i], 10, 0) + if err != nil { + t.Fatal("got an error from GetIntSlice():", err) + } + if uint(u) != v { + t.Fatalf("expected uis[%d] to be %d from GetUintSlice but got: %d", i, u, v) + } + } +} + +func TestUISWithDefault(t *testing.T) { + var uis []uint + f := setUpUISFlagSetWithDefault(&uis) + + vals := []string{"1", "2"} + arg := fmt.Sprintf("--uis=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range uis { + u, err := strconv.ParseUint(vals[i], 10, 0) + if err != nil { + t.Fatalf("got error: %v", err) + } + if uint(u) != v { + t.Fatalf("expected uis[%d] to be %d from GetUintSlice but got: %d", i, u, v) + } + } + + getUIS, err := f.GetUintSlice("uis") + if err != nil { + t.Fatal("got an error from GetUintSlice():", err) + } + for i, v := range getUIS { + u, err := strconv.ParseUint(vals[i], 10, 0) + if err != nil { + t.Fatalf("got error: %v", err) + } + if uint(u) != v { + t.Fatalf("expected uis[%d] to be %d from GetUintSlice but got: %d", i, u, v) + } + } +} + +func TestUISCalledTwice(t *testing.T) { + var uis []uint + f := setUpUISFlagSet(&uis) + + in := []string{"1,2", "3"} + expected := []int{1, 2, 3} + argfmt := "--uis=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range uis { + if uint(expected[i]) != v { + t.Fatalf("expected uis[%d] to be %d but got: %d", i, expected[i], v) + } + } +} diff --git a/vendor/golang.org/x/crypto/.gitattributes b/vendor/golang.org/x/crypto/.gitattributes new file mode 100644 index 000000000000..d2f212e5da80 --- /dev/null +++ b/vendor/golang.org/x/crypto/.gitattributes @@ -0,0 +1,10 @@ +# Treat all files in this repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. +# +# We'll prevent accidental CRLF line endings from entering the repo +# via the git-review gofmt checks. +# +# See golang.org/issue/9281 + +* -text diff --git a/vendor/golang.org/x/crypto/.gitignore b/vendor/golang.org/x/crypto/.gitignore new file mode 100644 index 000000000000..8339fd61d3f4 --- /dev/null +++ b/vendor/golang.org/x/crypto/.gitignore @@ -0,0 +1,2 @@ +# Add no patterns to .hgignore except for files generated by the build. +last-change diff --git a/vendor/golang.org/x/crypto/CONTRIBUTING.md b/vendor/golang.org/x/crypto/CONTRIBUTING.md new file mode 100644 index 000000000000..88dff59bc7d2 --- /dev/null +++ b/vendor/golang.org/x/crypto/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/crypto/README.md b/vendor/golang.org/x/crypto/README.md new file mode 100644 index 000000000000..c9d6fecd1e5f --- /dev/null +++ b/vendor/golang.org/x/crypto/README.md @@ -0,0 +1,21 @@ +# Go Cryptography + +This repository holds supplementary Go cryptography libraries. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/crypto/...`. You +can also manually git clone the repository to `$GOPATH/src/golang.org/x/crypto`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the crypto repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/crypto:" in the +subject line, so it is easy to find. + +Note that contributions to the cryptography package receive additional scrutiny +due to their sensitive nature. Patches may take longer than normal to receive +feedback. diff --git a/vendor/golang.org/x/crypto/codereview.cfg b/vendor/golang.org/x/crypto/codereview.cfg new file mode 100644 index 000000000000..3f8b14b64e83 --- /dev/null +++ b/vendor/golang.org/x/crypto/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/crypto/curve25519/BUILD b/vendor/golang.org/x/crypto/curve25519/BUILD index 2c5638b2dcc1..ed06a617e137 100644 --- a/vendor/golang.org/x/crypto/curve25519/BUILD +++ b/vendor/golang.org/x/crypto/curve25519/BUILD @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -42,6 +42,13 @@ go_library( visibility = ["//visibility:public"], ) +go_test( + name = "go_default_test", + srcs = ["curve25519_test.go"], + importpath = "golang.org/x/crypto/curve25519", + library = ":go_default_library", +) + filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_test.go b/vendor/golang.org/x/crypto/curve25519/curve25519_test.go new file mode 100644 index 000000000000..051a8301f080 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519_test.go @@ -0,0 +1,39 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package curve25519 + +import ( + "fmt" + "testing" +) + +const expectedHex = "89161fde887b2b53de549af483940106ecc114d6982daa98256de23bdf77661a" + +func TestBaseScalarMult(t *testing.T) { + var a, b [32]byte + in := &a + out := &b + a[0] = 1 + + for i := 0; i < 200; i++ { + ScalarBaseMult(out, in) + in, out = out, in + } + + result := fmt.Sprintf("%x", in[:]) + if result != expectedHex { + t.Errorf("incorrect result: got %s, want %s", result, expectedHex) + } +} + +func BenchmarkScalarBaseMult(b *testing.B) { + var in, out [32]byte + in[0] = 1 + + b.SetBytes(32) + for i := 0; i < b.N; i++ { + ScalarBaseMult(&out, &in) + } +} diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go index 076a8d4f10aa..da9b10d9c1ff 100644 --- a/vendor/golang.org/x/crypto/curve25519/doc.go +++ b/vendor/golang.org/x/crypto/curve25519/doc.go @@ -4,7 +4,7 @@ // Package curve25519 provides an implementation of scalar multiplication on // the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html -package curve25519 +package curve25519 // import "golang.org/x/crypto/curve25519" // basePoint is the x coordinate of the generator of the curve. var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} diff --git a/vendor/golang.org/x/crypto/ed25519/BUILD b/vendor/golang.org/x/crypto/ed25519/BUILD index 3315a838d974..7be0aca8ff6a 100644 --- a/vendor/golang.org/x/crypto/ed25519/BUILD +++ b/vendor/golang.org/x/crypto/ed25519/BUILD @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -8,6 +8,14 @@ go_library( deps = ["//vendor/golang.org/x/crypto/ed25519/internal/edwards25519:go_default_library"], ) +go_test( + name = "go_default_test", + srcs = ["ed25519_test.go"], + importpath = "golang.org/x/crypto/ed25519", + library = ":go_default_library", + deps = ["//vendor/golang.org/x/crypto/ed25519/internal/edwards25519:go_default_library"], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519_test.go b/vendor/golang.org/x/crypto/ed25519/ed25519_test.go new file mode 100644 index 000000000000..e272f8a557f9 --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/ed25519_test.go @@ -0,0 +1,183 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ed25519 + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto" + "crypto/rand" + "encoding/hex" + "os" + "strings" + "testing" + + "golang.org/x/crypto/ed25519/internal/edwards25519" +) + +type zeroReader struct{} + +func (zeroReader) Read(buf []byte) (int, error) { + for i := range buf { + buf[i] = 0 + } + return len(buf), nil +} + +func TestUnmarshalMarshal(t *testing.T) { + pub, _, _ := GenerateKey(rand.Reader) + + var A edwards25519.ExtendedGroupElement + var pubBytes [32]byte + copy(pubBytes[:], pub) + if !A.FromBytes(&pubBytes) { + t.Fatalf("ExtendedGroupElement.FromBytes failed") + } + + var pub2 [32]byte + A.ToBytes(&pub2) + + if pubBytes != pub2 { + t.Errorf("FromBytes(%v)->ToBytes does not round-trip, got %x\n", pubBytes, pub2) + } +} + +func TestSignVerify(t *testing.T) { + var zero zeroReader + public, private, _ := GenerateKey(zero) + + message := []byte("test message") + sig := Sign(private, message) + if !Verify(public, message, sig) { + t.Errorf("valid signature rejected") + } + + wrongMessage := []byte("wrong message") + if Verify(public, wrongMessage, sig) { + t.Errorf("signature of different message accepted") + } +} + +func TestCryptoSigner(t *testing.T) { + var zero zeroReader + public, private, _ := GenerateKey(zero) + + signer := crypto.Signer(private) + + publicInterface := signer.Public() + public2, ok := publicInterface.(PublicKey) + if !ok { + t.Fatalf("expected PublicKey from Public() but got %T", publicInterface) + } + + if !bytes.Equal(public, public2) { + t.Errorf("public keys do not match: original:%x vs Public():%x", public, public2) + } + + message := []byte("message") + var noHash crypto.Hash + signature, err := signer.Sign(zero, message, noHash) + if err != nil { + t.Fatalf("error from Sign(): %s", err) + } + + if !Verify(public, message, signature) { + t.Errorf("Verify failed on signature from Sign()") + } +} + +func TestGolden(t *testing.T) { + // sign.input.gz is a selection of test cases from + // https://ed25519.cr.yp.to/python/sign.input + testDataZ, err := os.Open("testdata/sign.input.gz") + if err != nil { + t.Fatal(err) + } + defer testDataZ.Close() + testData, err := gzip.NewReader(testDataZ) + if err != nil { + t.Fatal(err) + } + defer testData.Close() + + scanner := bufio.NewScanner(testData) + lineNo := 0 + + for scanner.Scan() { + lineNo++ + + line := scanner.Text() + parts := strings.Split(line, ":") + if len(parts) != 5 { + t.Fatalf("bad number of parts on line %d", lineNo) + } + + privBytes, _ := hex.DecodeString(parts[0]) + pubKey, _ := hex.DecodeString(parts[1]) + msg, _ := hex.DecodeString(parts[2]) + sig, _ := hex.DecodeString(parts[3]) + // The signatures in the test vectors also include the message + // at the end, but we just want R and S. + sig = sig[:SignatureSize] + + if l := len(pubKey); l != PublicKeySize { + t.Fatalf("bad public key length on line %d: got %d bytes", lineNo, l) + } + + var priv [PrivateKeySize]byte + copy(priv[:], privBytes) + copy(priv[32:], pubKey) + + sig2 := Sign(priv[:], msg) + if !bytes.Equal(sig, sig2[:]) { + t.Errorf("different signature result on line %d: %x vs %x", lineNo, sig, sig2) + } + + if !Verify(pubKey, msg, sig2) { + t.Errorf("signature failed to verify on line %d", lineNo) + } + } + + if err := scanner.Err(); err != nil { + t.Fatalf("error reading test data: %s", err) + } +} + +func BenchmarkKeyGeneration(b *testing.B) { + var zero zeroReader + for i := 0; i < b.N; i++ { + if _, _, err := GenerateKey(zero); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkSigning(b *testing.B) { + var zero zeroReader + _, priv, err := GenerateKey(zero) + if err != nil { + b.Fatal(err) + } + message := []byte("Hello, world!") + b.ResetTimer() + for i := 0; i < b.N; i++ { + Sign(priv, message) + } +} + +func BenchmarkVerification(b *testing.B) { + var zero zeroReader + pub, priv, err := GenerateKey(zero) + if err != nil { + b.Fatal(err) + } + message := []byte("Hello, world!") + signature := Sign(priv, message) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Verify(pub, message, signature) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/BUILD b/vendor/golang.org/x/crypto/ssh/BUILD index 2780d2f78e3e..bd8030e57f77 100644 --- a/vendor/golang.org/x/crypto/ssh/BUILD +++ b/vendor/golang.org/x/crypto/ssh/BUILD @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -32,6 +32,45 @@ go_library( ], ) +go_test( + name = "go_default_test", + srcs = [ + "benchmark_test.go", + "buffer_test.go", + "certs_test.go", + "cipher_test.go", + "client_auth_test.go", + "client_test.go", + "handshake_test.go", + "kex_test.go", + "keys_test.go", + "mempipe_test.go", + "messages_test.go", + "mux_test.go", + "session_test.go", + "tcpip_test.go", + "testdata_test.go", + "transport_test.go", + ], + importpath = "golang.org/x/crypto/ssh", + library = ":go_default_library", + deps = [ + "//vendor/golang.org/x/crypto/ed25519:go_default_library", + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + "//vendor/golang.org/x/crypto/ssh/testdata:go_default_library", + ], +) + +go_test( + name = "go_default_xtest", + srcs = ["example_test.go"], + importpath = "golang.org/x/crypto/ssh_test", + deps = [ + ":go_default_library", + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/vendor/golang.org/x/crypto/ssh/benchmark_test.go b/vendor/golang.org/x/crypto/ssh/benchmark_test.go new file mode 100644 index 000000000000..d9f7eb9b60ac --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/benchmark_test.go @@ -0,0 +1,122 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "errors" + "io" + "net" + "testing" +) + +type server struct { + *ServerConn + chans <-chan NewChannel +} + +func newServer(c net.Conn, conf *ServerConfig) (*server, error) { + sconn, chans, reqs, err := NewServerConn(c, conf) + if err != nil { + return nil, err + } + go DiscardRequests(reqs) + return &server{sconn, chans}, nil +} + +func (s *server) Accept() (NewChannel, error) { + n, ok := <-s.chans + if !ok { + return nil, io.EOF + } + return n, nil +} + +func sshPipe() (Conn, *server, error) { + c1, c2, err := netPipe() + if err != nil { + return nil, nil, err + } + + clientConf := ClientConfig{ + User: "user", + } + serverConf := ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["ecdsa"]) + done := make(chan *server, 1) + go func() { + server, err := newServer(c2, &serverConf) + if err != nil { + done <- nil + } + done <- server + }() + + client, _, reqs, err := NewClientConn(c1, "", &clientConf) + if err != nil { + return nil, nil, err + } + + server := <-done + if server == nil { + return nil, nil, errors.New("server handshake failed.") + } + go DiscardRequests(reqs) + + return client, server, nil +} + +func BenchmarkEndToEnd(b *testing.B) { + b.StopTimer() + + client, server, err := sshPipe() + if err != nil { + b.Fatalf("sshPipe: %v", err) + } + + defer client.Close() + defer server.Close() + + size := (1 << 20) + input := make([]byte, size) + output := make([]byte, size) + b.SetBytes(int64(size)) + done := make(chan int, 1) + + go func() { + newCh, err := server.Accept() + if err != nil { + b.Fatalf("Client: %v", err) + } + ch, incoming, err := newCh.Accept() + go DiscardRequests(incoming) + for i := 0; i < b.N; i++ { + if _, err := io.ReadFull(ch, output); err != nil { + b.Fatalf("ReadFull: %v", err) + } + } + ch.Close() + done <- 1 + }() + + ch, in, err := client.OpenChannel("speed", nil) + if err != nil { + b.Fatalf("OpenChannel: %v", err) + } + go DiscardRequests(in) + + b.ResetTimer() + b.StartTimer() + for i := 0; i < b.N; i++ { + if _, err := ch.Write(input); err != nil { + b.Fatalf("WriteFull: %v", err) + } + } + ch.Close() + b.StopTimer() + + <-done +} diff --git a/vendor/golang.org/x/crypto/ssh/buffer_test.go b/vendor/golang.org/x/crypto/ssh/buffer_test.go new file mode 100644 index 000000000000..d5781cb3da99 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/buffer_test.go @@ -0,0 +1,87 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "testing" +) + +var alphabet = []byte("abcdefghijklmnopqrstuvwxyz") + +func TestBufferReadwrite(t *testing.T) { + b := newBuffer() + b.write(alphabet[:10]) + r, _ := b.Read(make([]byte, 10)) + if r != 10 { + t.Fatalf("Expected written == read == 10, written: 10, read %d", r) + } + + b = newBuffer() + b.write(alphabet[:5]) + r, _ = b.Read(make([]byte, 10)) + if r != 5 { + t.Fatalf("Expected written == read == 5, written: 5, read %d", r) + } + + b = newBuffer() + b.write(alphabet[:10]) + r, _ = b.Read(make([]byte, 5)) + if r != 5 { + t.Fatalf("Expected written == 10, read == 5, written: 10, read %d", r) + } + + b = newBuffer() + b.write(alphabet[:5]) + b.write(alphabet[5:15]) + r, _ = b.Read(make([]byte, 10)) + r2, _ := b.Read(make([]byte, 10)) + if r != 10 || r2 != 5 || 15 != r+r2 { + t.Fatal("Expected written == read == 15") + } +} + +func TestBufferClose(t *testing.T) { + b := newBuffer() + b.write(alphabet[:10]) + b.eof() + _, err := b.Read(make([]byte, 5)) + if err != nil { + t.Fatal("expected read of 5 to not return EOF") + } + b = newBuffer() + b.write(alphabet[:10]) + b.eof() + r, err := b.Read(make([]byte, 5)) + r2, err2 := b.Read(make([]byte, 10)) + if r != 5 || r2 != 5 || err != nil || err2 != nil { + t.Fatal("expected reads of 5 and 5") + } + + b = newBuffer() + b.write(alphabet[:10]) + b.eof() + r, err = b.Read(make([]byte, 5)) + r2, err2 = b.Read(make([]byte, 10)) + r3, err3 := b.Read(make([]byte, 10)) + if r != 5 || r2 != 5 || r3 != 0 || err != nil || err2 != nil || err3 != io.EOF { + t.Fatal("expected reads of 5 and 5 and 0, with EOF") + } + + b = newBuffer() + b.write(make([]byte, 5)) + b.write(make([]byte, 10)) + b.eof() + r, err = b.Read(make([]byte, 9)) + r2, err2 = b.Read(make([]byte, 3)) + r3, err3 = b.Read(make([]byte, 3)) + r4, err4 := b.Read(make([]byte, 10)) + if err != nil || err2 != nil || err3 != nil || err4 != io.EOF { + t.Fatalf("Expected EOF on forth read only, err=%v, err2=%v, err3=%v, err4=%v", err, err2, err3, err4) + } + if r != 9 || r2 != 3 || r3 != 3 || r4 != 0 { + t.Fatal("Expected written == read == 15", r, r2, r3, r4) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/certs_test.go b/vendor/golang.org/x/crypto/ssh/certs_test.go new file mode 100644 index 000000000000..0200531f4c7d --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/certs_test.go @@ -0,0 +1,222 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/rand" + "reflect" + "testing" + "time" +) + +// Cert generated by ssh-keygen 6.0p1 Debian-4. +// % ssh-keygen -s ca-key -I test user-key +const exampleSSHCert = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgb1srW/W3ZDjYAO45xLYAwzHBDLsJ4Ux6ICFIkTjb1LEAAAADAQABAAAAYQCkoR51poH0wE8w72cqSB8Sszx+vAhzcMdCO0wqHTj7UNENHWEXGrU0E0UQekD7U+yhkhtoyjbPOVIP7hNa6aRk/ezdh/iUnCIt4Jt1v3Z1h1P+hA4QuYFMHNB+rmjPwAcAAAAAAAAAAAAAAAEAAAAEdGVzdAAAAAAAAAAAAAAAAP//////////AAAAAAAAAIIAAAAVcGVybWl0LVgxMS1mb3J3YXJkaW5nAAAAAAAAABdwZXJtaXQtYWdlbnQtZm9yd2FyZGluZwAAAAAAAAAWcGVybWl0LXBvcnQtZm9yd2FyZGluZwAAAAAAAAAKcGVybWl0LXB0eQAAAAAAAAAOcGVybWl0LXVzZXItcmMAAAAAAAAAAAAAAHcAAAAHc3NoLXJzYQAAAAMBAAEAAABhANFS2kaktpSGc+CcmEKPyw9mJC4nZKxHKTgLVZeaGbFZOvJTNzBspQHdy7Q1uKSfktxpgjZnksiu/tFF9ngyY2KFoc+U88ya95IZUycBGCUbBQ8+bhDtw/icdDGQD5WnUwAAAG8AAAAHc3NoLXJzYQAAAGC8Y9Z2LQKhIhxf52773XaWrXdxP0t3GBVo4A10vUWiYoAGepr6rQIoGGXFxT4B9Gp+nEBJjOwKDXPrAevow0T9ca8gZN+0ykbhSrXLE5Ao48rqr3zP4O1/9P7e6gp0gw8=` + +func TestParseCert(t *testing.T) { + authKeyBytes := []byte(exampleSSHCert) + + key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes) + if err != nil { + t.Fatalf("ParseAuthorizedKey: %v", err) + } + if len(rest) > 0 { + t.Errorf("rest: got %q, want empty", rest) + } + + if _, ok := key.(*Certificate); !ok { + t.Fatalf("got %v (%T), want *Certificate", key, key) + } + + marshaled := MarshalAuthorizedKey(key) + // Before comparison, remove the trailing newline that + // MarshalAuthorizedKey adds. + marshaled = marshaled[:len(marshaled)-1] + if !bytes.Equal(authKeyBytes, marshaled) { + t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes) + } +} + +// Cert generated by ssh-keygen OpenSSH_6.8p1 OS X 10.10.3 +// % ssh-keygen -s ca -I testcert -O source-address=192.168.1.0/24 -O force-command=/bin/sleep user.pub +// user.pub key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMN +// Critical Options: +// force-command /bin/sleep +// source-address 192.168.1.0/24 +// Extensions: +// permit-X11-forwarding +// permit-agent-forwarding +// permit-port-forwarding +// permit-pty +// permit-user-rc +const exampleSSHCertWithOptions = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgDyysCJY0XrO1n03EeRRoITnTPdjENFmWDs9X58PP3VUAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMNAAAAAAAAAAAAAAABAAAACHRlc3RjZXJ0AAAAAAAAAAAAAAAA//////////8AAABLAAAADWZvcmNlLWNvbW1hbmQAAAAOAAAACi9iaW4vc2xlZXAAAAAOc291cmNlLWFkZHJlc3MAAAASAAAADjE5Mi4xNjguMS4wLzI0AAAAggAAABVwZXJtaXQtWDExLWZvcndhcmRpbmcAAAAAAAAAF3Blcm1pdC1hZ2VudC1mb3J3YXJkaW5nAAAAAAAAABZwZXJtaXQtcG9ydC1mb3J3YXJkaW5nAAAAAAAAAApwZXJtaXQtcHR5AAAAAAAAAA5wZXJtaXQtdXNlci1yYwAAAAAAAAAAAAABFwAAAAdzc2gtcnNhAAAAAwEAAQAAAQEAwU+c5ui5A8+J/CFpjW8wCa52bEODA808WWQDCSuTG/eMXNf59v9Y8Pk0F1E9dGCosSNyVcB/hacUrc6He+i97+HJCyKavBsE6GDxrjRyxYqAlfcOXi/IVmaUGiO8OQ39d4GHrjToInKvExSUeleQyH4Y4/e27T/pILAqPFL3fyrvMLT5qU9QyIt6zIpa7GBP5+urouNavMprV3zsfIqNBbWypinOQAw823a5wN+zwXnhZrgQiHZ/USG09Y6k98y1dTVz8YHlQVR4D3lpTAsKDKJ5hCH9WU4fdf+lU8OyNGaJ/vz0XNqxcToe1l4numLTnaoSuH89pHryjqurB7lJKwAAAQ8AAAAHc3NoLXJzYQAAAQCaHvUIoPL1zWUHIXLvu96/HU1s/i4CAW2IIEuGgxCUCiFj6vyTyYtgxQxcmbfZf6eaITlS6XJZa7Qq4iaFZh75C1DXTX8labXhRSD4E2t//AIP9MC1rtQC5xo6FmbQ+BoKcDskr+mNACcbRSxs3IL3bwCfWDnIw2WbVox9ZdcthJKk4UoCW4ix4QwdHw7zlddlz++fGEEVhmTbll1SUkycGApPFBsAYRTMupUJcYPIeReBI/m8XfkoMk99bV8ZJQTAd7OekHY2/48Ff53jLmyDjP7kNw1F8OaPtkFs6dGJXta4krmaekPy87j+35In5hFj7yoOqvSbmYUkeX70/GGQ` + +func TestParseCertWithOptions(t *testing.T) { + opts := map[string]string{ + "source-address": "192.168.1.0/24", + "force-command": "/bin/sleep", + } + exts := map[string]string{ + "permit-X11-forwarding": "", + "permit-agent-forwarding": "", + "permit-port-forwarding": "", + "permit-pty": "", + "permit-user-rc": "", + } + authKeyBytes := []byte(exampleSSHCertWithOptions) + + key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes) + if err != nil { + t.Fatalf("ParseAuthorizedKey: %v", err) + } + if len(rest) > 0 { + t.Errorf("rest: got %q, want empty", rest) + } + cert, ok := key.(*Certificate) + if !ok { + t.Fatalf("got %v (%T), want *Certificate", key, key) + } + if !reflect.DeepEqual(cert.CriticalOptions, opts) { + t.Errorf("unexpected critical options - got %v, want %v", cert.CriticalOptions, opts) + } + if !reflect.DeepEqual(cert.Extensions, exts) { + t.Errorf("unexpected Extensions - got %v, want %v", cert.Extensions, exts) + } + marshaled := MarshalAuthorizedKey(key) + // Before comparison, remove the trailing newline that + // MarshalAuthorizedKey adds. + marshaled = marshaled[:len(marshaled)-1] + if !bytes.Equal(authKeyBytes, marshaled) { + t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes) + } +} + +func TestValidateCert(t *testing.T) { + key, _, _, _, err := ParseAuthorizedKey([]byte(exampleSSHCert)) + if err != nil { + t.Fatalf("ParseAuthorizedKey: %v", err) + } + validCert, ok := key.(*Certificate) + if !ok { + t.Fatalf("got %v (%T), want *Certificate", key, key) + } + checker := CertChecker{} + checker.IsUserAuthority = func(k PublicKey) bool { + return bytes.Equal(k.Marshal(), validCert.SignatureKey.Marshal()) + } + + if err := checker.CheckCert("user", validCert); err != nil { + t.Errorf("Unable to validate certificate: %v", err) + } + invalidCert := &Certificate{ + Key: testPublicKeys["rsa"], + SignatureKey: testPublicKeys["ecdsa"], + ValidBefore: CertTimeInfinity, + Signature: &Signature{}, + } + if err := checker.CheckCert("user", invalidCert); err == nil { + t.Error("Invalid cert signature passed validation") + } +} + +func TestValidateCertTime(t *testing.T) { + cert := Certificate{ + ValidPrincipals: []string{"user"}, + Key: testPublicKeys["rsa"], + ValidAfter: 50, + ValidBefore: 100, + } + + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + + for ts, ok := range map[int64]bool{ + 25: false, + 50: true, + 99: true, + 100: false, + 125: false, + } { + checker := CertChecker{ + Clock: func() time.Time { return time.Unix(ts, 0) }, + } + checker.IsUserAuthority = func(k PublicKey) bool { + return bytes.Equal(k.Marshal(), + testPublicKeys["ecdsa"].Marshal()) + } + + if v := checker.CheckCert("user", &cert); (v == nil) != ok { + t.Errorf("Authenticate(%d): %v", ts, v) + } + } +} + +// TODO(hanwen): tests for +// +// host keys: +// * fallbacks + +func TestHostKeyCert(t *testing.T) { + cert := &Certificate{ + ValidPrincipals: []string{"hostname", "hostname.domain", "otherhost"}, + Key: testPublicKeys["rsa"], + ValidBefore: CertTimeInfinity, + CertType: HostCert, + } + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + + checker := &CertChecker{ + IsHostAuthority: func(p PublicKey, addr string) bool { + return addr == "hostname:22" && bytes.Equal(testPublicKeys["ecdsa"].Marshal(), p.Marshal()) + }, + } + + certSigner, err := NewCertSigner(cert, testSigners["rsa"]) + if err != nil { + t.Errorf("NewCertSigner: %v", err) + } + + for _, test := range []struct { + addr string + succeed bool + }{ + {addr: "hostname:22", succeed: true}, + {addr: "otherhost:22", succeed: false}, // The certificate is valid for 'otherhost' as hostname, but we only recognize the authority of the signer for the address 'hostname:22' + {addr: "lasthost:22", succeed: false}, + } { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + errc := make(chan error) + + go func() { + conf := ServerConfig{ + NoClientAuth: true, + } + conf.AddHostKey(certSigner) + _, _, _, err := NewServerConn(c1, &conf) + errc <- err + }() + + config := &ClientConfig{ + User: "user", + HostKeyCallback: checker.CheckHostKey, + } + _, _, _, err = NewClientConn(c2, test.addr, config) + + if (err == nil) != test.succeed { + t.Fatalf("NewClientConn(%q): %v", test.addr, err) + } + + err = <-errc + if (err == nil) != test.succeed { + t.Fatalf("NewServerConn(%q): %v", test.addr, err) + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/cipher_test.go b/vendor/golang.org/x/crypto/ssh/cipher_test.go new file mode 100644 index 000000000000..5cfa17a62de5 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/cipher_test.go @@ -0,0 +1,129 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/rand" + "testing" +) + +func TestDefaultCiphersExist(t *testing.T) { + for _, cipherAlgo := range supportedCiphers { + if _, ok := cipherModes[cipherAlgo]; !ok { + t.Errorf("default cipher %q is unknown", cipherAlgo) + } + } +} + +func TestPacketCiphers(t *testing.T) { + // Still test aes128cbc cipher although it's commented out. + cipherModes[aes128cbcID] = &streamCipherMode{16, aes.BlockSize, 0, nil} + defer delete(cipherModes, aes128cbcID) + + for cipher := range cipherModes { + for mac := range macModes { + kr := &kexResult{Hash: crypto.SHA1} + algs := directionAlgorithms{ + Cipher: cipher, + MAC: mac, + Compression: "none", + } + client, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Errorf("newPacketCipher(client, %q, %q): %v", cipher, mac, err) + continue + } + server, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Errorf("newPacketCipher(client, %q, %q): %v", cipher, mac, err) + continue + } + + want := "bla bla" + input := []byte(want) + buf := &bytes.Buffer{} + if err := client.writePacket(0, buf, rand.Reader, input); err != nil { + t.Errorf("writePacket(%q, %q): %v", cipher, mac, err) + continue + } + + packet, err := server.readPacket(0, buf) + if err != nil { + t.Errorf("readPacket(%q, %q): %v", cipher, mac, err) + continue + } + + if string(packet) != want { + t.Errorf("roundtrip(%q, %q): got %q, want %q", cipher, mac, packet, want) + } + } + } +} + +func TestCBCOracleCounterMeasure(t *testing.T) { + cipherModes[aes128cbcID] = &streamCipherMode{16, aes.BlockSize, 0, nil} + defer delete(cipherModes, aes128cbcID) + + kr := &kexResult{Hash: crypto.SHA1} + algs := directionAlgorithms{ + Cipher: aes128cbcID, + MAC: "hmac-sha1", + Compression: "none", + } + client, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Fatalf("newPacketCipher(client): %v", err) + } + + want := "bla bla" + input := []byte(want) + buf := &bytes.Buffer{} + if err := client.writePacket(0, buf, rand.Reader, input); err != nil { + t.Errorf("writePacket: %v", err) + } + + packetSize := buf.Len() + buf.Write(make([]byte, 2*maxPacket)) + + // We corrupt each byte, but this usually will only test the + // 'packet too large' or 'MAC failure' cases. + lastRead := -1 + for i := 0; i < packetSize; i++ { + server, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Fatalf("newPacketCipher(client): %v", err) + } + + fresh := &bytes.Buffer{} + fresh.Write(buf.Bytes()) + fresh.Bytes()[i] ^= 0x01 + + before := fresh.Len() + _, err = server.readPacket(0, fresh) + if err == nil { + t.Errorf("corrupt byte %d: readPacket succeeded ", i) + continue + } + if _, ok := err.(cbcError); !ok { + t.Errorf("corrupt byte %d: got %v (%T), want cbcError", i, err, err) + continue + } + + after := fresh.Len() + bytesRead := before - after + if bytesRead < maxPacket { + t.Errorf("corrupt byte %d: read %d bytes, want more than %d", i, bytesRead, maxPacket) + continue + } + + if i > 0 && bytesRead != lastRead { + t.Errorf("corrupt byte %d: read %d bytes, want %d bytes read", i, bytesRead, lastRead) + } + lastRead = bytesRead + } +} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth_test.go b/vendor/golang.org/x/crypto/ssh/client_auth_test.go new file mode 100644 index 000000000000..145b57a2bb70 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client_auth_test.go @@ -0,0 +1,628 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/rand" + "errors" + "fmt" + "os" + "strings" + "testing" +) + +type keyboardInteractive map[string]string + +func (cr keyboardInteractive) Challenge(user string, instruction string, questions []string, echos []bool) ([]string, error) { + var answers []string + for _, q := range questions { + answers = append(answers, cr[q]) + } + return answers, nil +} + +// reused internally by tests +var clientPassword = "tiger" + +// tryAuth runs a handshake with a given config against an SSH server +// with config serverConfig +func tryAuth(t *testing.T, config *ClientConfig) error { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + certChecker := CertChecker{ + IsUserAuthority: func(k PublicKey) bool { + return bytes.Equal(k.Marshal(), testPublicKeys["ecdsa"].Marshal()) + }, + UserKeyFallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) { + if conn.User() == "testuser" && bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) { + return nil, nil + } + + return nil, fmt.Errorf("pubkey for %q not acceptable", conn.User()) + }, + IsRevoked: func(c *Certificate) bool { + return c.Serial == 666 + }, + } + + serverConfig := &ServerConfig{ + PasswordCallback: func(conn ConnMetadata, pass []byte) (*Permissions, error) { + if conn.User() == "testuser" && string(pass) == clientPassword { + return nil, nil + } + return nil, errors.New("password auth failed") + }, + PublicKeyCallback: certChecker.Authenticate, + KeyboardInteractiveCallback: func(conn ConnMetadata, challenge KeyboardInteractiveChallenge) (*Permissions, error) { + ans, err := challenge("user", + "instruction", + []string{"question1", "question2"}, + []bool{true, true}) + if err != nil { + return nil, err + } + ok := conn.User() == "testuser" && ans[0] == "answer1" && ans[1] == "answer2" + if ok { + challenge("user", "motd", nil, nil) + return nil, nil + } + return nil, errors.New("keyboard-interactive failed") + }, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + go newServer(c1, serverConfig) + _, _, _, err = NewClientConn(c2, "", config) + return err +} + +func TestClientAuthPublicKey(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestAuthMethodPassword(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + Password(clientPassword), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestAuthMethodFallback(t *testing.T) { + var passwordCalled bool + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + PasswordCallback( + func() (string, error) { + passwordCalled = true + return "WRONG", nil + }), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } + + if passwordCalled { + t.Errorf("password auth tried before public-key auth.") + } +} + +func TestAuthMethodWrongPassword(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + Password("wrong"), + PublicKeys(testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestAuthMethodKeyboardInteractive(t *testing.T) { + answers := keyboardInteractive(map[string]string{ + "question1": "answer1", + "question2": "answer2", + }) + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + KeyboardInteractive(answers.Challenge), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +func TestAuthMethodWrongKeyboardInteractive(t *testing.T) { + answers := keyboardInteractive(map[string]string{ + "question1": "answer1", + "question2": "WRONG", + }) + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + KeyboardInteractive(answers.Challenge), + }, + } + + if err := tryAuth(t, config); err == nil { + t.Fatalf("wrong answers should not have authenticated with KeyboardInteractive") + } +} + +// the mock server will only authenticate ssh-rsa keys +func TestAuthMethodInvalidPublicKey(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["dsa"]), + }, + } + + if err := tryAuth(t, config); err == nil { + t.Fatalf("dsa private key should not have authenticated with rsa public key") + } +} + +// the client should authenticate with the second key +func TestAuthMethodRSAandDSA(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["dsa"], testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, config); err != nil { + t.Fatalf("client could not authenticate with rsa key: %v", err) + } +} + +func TestClientHMAC(t *testing.T) { + for _, mac := range supportedMACs { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + }, + Config: Config{ + MACs: []string{mac}, + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, config); err != nil { + t.Fatalf("client could not authenticate with mac algo %s: %v", mac, err) + } + } +} + +// issue 4285. +func TestClientUnsupportedCipher(t *testing.T) { + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(), + }, + Config: Config{ + Ciphers: []string{"aes128-cbc"}, // not currently supported + }, + } + if err := tryAuth(t, config); err == nil { + t.Errorf("expected no ciphers in common") + } +} + +func TestClientUnsupportedKex(t *testing.T) { + if os.Getenv("GO_BUILDER_NAME") != "" { + t.Skip("skipping known-flaky test on the Go build dashboard; see golang.org/issue/15198") + } + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(), + }, + Config: Config{ + KeyExchanges: []string{"diffie-hellman-group-exchange-sha256"}, // not currently supported + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, config); err == nil || !strings.Contains(err.Error(), "common algorithm") { + t.Errorf("got %v, expected 'common algorithm'", err) + } +} + +func TestClientLoginCert(t *testing.T) { + cert := &Certificate{ + Key: testPublicKeys["rsa"], + ValidBefore: CertTimeInfinity, + CertType: UserCert, + } + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + certSigner, err := NewCertSigner(cert, testSigners["rsa"]) + if err != nil { + t.Fatalf("NewCertSigner: %v", err) + } + + clientConfig := &ClientConfig{ + User: "user", + HostKeyCallback: InsecureIgnoreHostKey(), + } + clientConfig.Auth = append(clientConfig.Auth, PublicKeys(certSigner)) + + // should succeed + if err := tryAuth(t, clientConfig); err != nil { + t.Errorf("cert login failed: %v", err) + } + + // corrupted signature + cert.Signature.Blob[0]++ + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with corrupted sig") + } + + // revoked + cert.Serial = 666 + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("revoked cert login succeeded") + } + cert.Serial = 1 + + // sign with wrong key + cert.SignCert(rand.Reader, testSigners["dsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with non-authoritative key") + } + + // host cert + cert.CertType = HostCert + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with wrong type") + } + cert.CertType = UserCert + + // principal specified + cert.ValidPrincipals = []string{"user"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err != nil { + t.Errorf("cert login failed: %v", err) + } + + // wrong principal specified + cert.ValidPrincipals = []string{"fred"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with wrong principal") + } + cert.ValidPrincipals = nil + + // added critical option + cert.CriticalOptions = map[string]string{"root-access": "yes"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login passed with unrecognized critical option") + } + + // allowed source address + cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42/24,::42/120"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err != nil { + t.Errorf("cert login with source-address failed: %v", err) + } + + // disallowed source address + cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42,::42"} + cert.SignCert(rand.Reader, testSigners["ecdsa"]) + if err := tryAuth(t, clientConfig); err == nil { + t.Errorf("cert login with source-address succeeded") + } +} + +func testPermissionsPassing(withPermissions bool, t *testing.T) { + serverConfig := &ServerConfig{ + PublicKeyCallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) { + if conn.User() == "nopermissions" { + return nil, nil + } + return &Permissions{}, nil + }, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + clientConfig := &ClientConfig{ + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if withPermissions { + clientConfig.User = "permissions" + } else { + clientConfig.User = "nopermissions" + } + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewClientConn(c2, "", clientConfig) + serverConn, err := newServer(c1, serverConfig) + if err != nil { + t.Fatal(err) + } + if p := serverConn.Permissions; (p != nil) != withPermissions { + t.Fatalf("withPermissions is %t, but Permissions object is %#v", withPermissions, p) + } +} + +func TestPermissionsPassing(t *testing.T) { + testPermissionsPassing(true, t) +} + +func TestNoPermissionsPassing(t *testing.T) { + testPermissionsPassing(false, t) +} + +func TestRetryableAuth(t *testing.T) { + n := 0 + passwords := []string{"WRONG1", "WRONG2"} + + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + RetryableAuthMethod(PasswordCallback(func() (string, error) { + p := passwords[n] + n++ + return p, nil + }), 2), + PublicKeys(testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } + if n != 2 { + t.Fatalf("Did not try all passwords") + } +} + +func ExampleRetryableAuthMethod(t *testing.T) { + user := "testuser" + NumberOfPrompts := 3 + + // Normally this would be a callback that prompts the user to answer the + // provided questions + Cb := func(user, instruction string, questions []string, echos []bool) (answers []string, err error) { + return []string{"answer1", "answer2"}, nil + } + + config := &ClientConfig{ + HostKeyCallback: InsecureIgnoreHostKey(), + User: user, + Auth: []AuthMethod{ + RetryableAuthMethod(KeyboardInteractiveChallenge(Cb), NumberOfPrompts), + }, + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +// Test if username is received on server side when NoClientAuth is used +func TestClientAuthNone(t *testing.T) { + user := "testuser" + serverConfig := &ServerConfig{ + NoClientAuth: true, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + clientConfig := &ClientConfig{ + User: user, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewClientConn(c2, "", clientConfig) + serverConn, err := newServer(c1, serverConfig) + if err != nil { + t.Fatalf("newServer: %v", err) + } + if serverConn.User() != user { + t.Fatalf("server: got %q, want %q", serverConn.User(), user) + } +} + +// Test if authentication attempts are limited on server when MaxAuthTries is set +func TestClientAuthMaxAuthTries(t *testing.T) { + user := "testuser" + + serverConfig := &ServerConfig{ + MaxAuthTries: 2, + PasswordCallback: func(conn ConnMetadata, pass []byte) (*Permissions, error) { + if conn.User() == "testuser" && string(pass) == "right" { + return nil, nil + } + return nil, errors.New("password auth failed") + }, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + expectedErr := fmt.Errorf("ssh: handshake failed: %v", &disconnectMsg{ + Reason: 2, + Message: "too many authentication failures", + }) + + for tries := 2; tries < 4; tries++ { + n := tries + clientConfig := &ClientConfig{ + User: user, + Auth: []AuthMethod{ + RetryableAuthMethod(PasswordCallback(func() (string, error) { + n-- + if n == 0 { + return "right", nil + } + return "wrong", nil + }), tries), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go newServer(c1, serverConfig) + _, _, _, err = NewClientConn(c2, "", clientConfig) + if tries > 2 { + if err == nil { + t.Fatalf("client: got no error, want %s", expectedErr) + } else if err.Error() != expectedErr.Error() { + t.Fatalf("client: got %s, want %s", err, expectedErr) + } + } else { + if err != nil { + t.Fatalf("client: got %s, want no error", err) + } + } + } +} + +// Test if authentication attempts are correctly limited on server +// when more public keys are provided then MaxAuthTries +func TestClientAuthMaxAuthTriesPublicKey(t *testing.T) { + signers := []Signer{} + for i := 0; i < 6; i++ { + signers = append(signers, testSigners["dsa"]) + } + + validConfig := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(append([]Signer{testSigners["rsa"]}, signers...)...), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, validConfig); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } + + expectedErr := fmt.Errorf("ssh: handshake failed: %v", &disconnectMsg{ + Reason: 2, + Message: "too many authentication failures", + }) + invalidConfig := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + PublicKeys(append(signers, testSigners["rsa"])...), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + if err := tryAuth(t, invalidConfig); err == nil { + t.Fatalf("client: got no error, want %s", expectedErr) + } else if err.Error() != expectedErr.Error() { + t.Fatalf("client: got %s, want %s", err, expectedErr) + } +} + +// Test whether authentication errors are being properly logged if all +// authentication methods have been exhausted +func TestClientAuthErrorList(t *testing.T) { + publicKeyErr := errors.New("This is an error from PublicKeyCallback") + + clientConfig := &ClientConfig{ + Auth: []AuthMethod{ + PublicKeys(testSigners["rsa"]), + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + serverConfig := &ServerConfig{ + PublicKeyCallback: func(_ ConnMetadata, _ PublicKey) (*Permissions, error) { + return nil, publicKeyErr + }, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewClientConn(c2, "", clientConfig) + _, err = newServer(c1, serverConfig) + if err == nil { + t.Fatal("newServer: got nil, expected errors") + } + + authErrs, ok := err.(*ServerAuthError) + if !ok { + t.Fatalf("errors: got %T, want *ssh.ServerAuthError", err) + } + for i, e := range authErrs.Errors { + switch i { + case 0: + if e.Error() != "no auth passed yet" { + t.Fatalf("errors: got %v, want no auth passed yet", e.Error()) + } + case 1: + if e != publicKeyErr { + t.Fatalf("errors: got %v, want %v", e, publicKeyErr) + } + default: + t.Fatalf("errors: got %v, expected 2 errors", authErrs.Errors) + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/client_test.go b/vendor/golang.org/x/crypto/ssh/client_test.go new file mode 100644 index 000000000000..ccf56074d724 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client_test.go @@ -0,0 +1,81 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "net" + "strings" + "testing" +) + +func testClientVersion(t *testing.T, config *ClientConfig, expected string) { + clientConn, serverConn := net.Pipe() + defer clientConn.Close() + receivedVersion := make(chan string, 1) + config.HostKeyCallback = InsecureIgnoreHostKey() + go func() { + version, err := readVersion(serverConn) + if err != nil { + receivedVersion <- "" + } else { + receivedVersion <- string(version) + } + serverConn.Close() + }() + NewClientConn(clientConn, "", config) + actual := <-receivedVersion + if actual != expected { + t.Fatalf("got %s; want %s", actual, expected) + } +} + +func TestCustomClientVersion(t *testing.T) { + version := "Test-Client-Version-0.0" + testClientVersion(t, &ClientConfig{ClientVersion: version}, version) +} + +func TestDefaultClientVersion(t *testing.T) { + testClientVersion(t, &ClientConfig{}, packageVersion) +} + +func TestHostKeyCheck(t *testing.T) { + for _, tt := range []struct { + name string + wantError string + key PublicKey + }{ + {"no callback", "must specify HostKeyCallback", nil}, + {"correct key", "", testSigners["rsa"].PublicKey()}, + {"mismatch", "mismatch", testSigners["ecdsa"].PublicKey()}, + } { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + serverConf := &ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["rsa"]) + + go NewServerConn(c1, serverConf) + clientConf := ClientConfig{ + User: "user", + } + if tt.key != nil { + clientConf.HostKeyCallback = FixedHostKey(tt.key) + } + + _, _, _, err = NewClientConn(c2, "", &clientConf) + if err != nil { + if tt.wantError == "" || !strings.Contains(err.Error(), tt.wantError) { + t.Errorf("%s: got error %q, missing %q", tt.name, err.Error(), tt.wantError) + } + } else if tt.wantError != "" { + t.Errorf("%s: succeeded, but want error string %q", tt.name, tt.wantError) + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go index 6eb34c58d0c0..67b7322c0580 100644 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -18,4 +18,4 @@ References: This package does not fall under the stability promise of the Go language itself, so its API may be changed when pressing needs arise. */ -package ssh +package ssh // import "golang.org/x/crypto/ssh" diff --git a/vendor/golang.org/x/crypto/ssh/example_test.go b/vendor/golang.org/x/crypto/ssh/example_test.go new file mode 100644 index 000000000000..b910c7bf60ba --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/example_test.go @@ -0,0 +1,320 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh_test + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + "os" + "path/filepath" + "strings" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/terminal" +) + +func ExampleNewServerConn() { + // Public key authentication is done by comparing + // the public key of a received connection + // with the entries in the authorized_keys file. + authorizedKeysBytes, err := ioutil.ReadFile("authorized_keys") + if err != nil { + log.Fatalf("Failed to load authorized_keys, err: %v", err) + } + + authorizedKeysMap := map[string]bool{} + for len(authorizedKeysBytes) > 0 { + pubKey, _, _, rest, err := ssh.ParseAuthorizedKey(authorizedKeysBytes) + if err != nil { + log.Fatal(err) + } + + authorizedKeysMap[string(pubKey.Marshal())] = true + authorizedKeysBytes = rest + } + + // An SSH server is represented by a ServerConfig, which holds + // certificate details and handles authentication of ServerConns. + config := &ssh.ServerConfig{ + // Remove to disable password auth. + PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { + // Should use constant-time compare (or better, salt+hash) in + // a production setting. + if c.User() == "testuser" && string(pass) == "tiger" { + return nil, nil + } + return nil, fmt.Errorf("password rejected for %q", c.User()) + }, + + // Remove to disable public key auth. + PublicKeyCallback: func(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) { + if authorizedKeysMap[string(pubKey.Marshal())] { + return &ssh.Permissions{ + // Record the public key used for authentication. + Extensions: map[string]string{ + "pubkey-fp": ssh.FingerprintSHA256(pubKey), + }, + }, nil + } + return nil, fmt.Errorf("unknown public key for %q", c.User()) + }, + } + + privateBytes, err := ioutil.ReadFile("id_rsa") + if err != nil { + log.Fatal("Failed to load private key: ", err) + } + + private, err := ssh.ParsePrivateKey(privateBytes) + if err != nil { + log.Fatal("Failed to parse private key: ", err) + } + + config.AddHostKey(private) + + // Once a ServerConfig has been configured, connections can be + // accepted. + listener, err := net.Listen("tcp", "0.0.0.0:2022") + if err != nil { + log.Fatal("failed to listen for connection: ", err) + } + nConn, err := listener.Accept() + if err != nil { + log.Fatal("failed to accept incoming connection: ", err) + } + + // Before use, a handshake must be performed on the incoming + // net.Conn. + conn, chans, reqs, err := ssh.NewServerConn(nConn, config) + if err != nil { + log.Fatal("failed to handshake: ", err) + } + log.Printf("logged in with key %s", conn.Permissions.Extensions["pubkey-fp"]) + + // The incoming Request channel must be serviced. + go ssh.DiscardRequests(reqs) + + // Service the incoming Channel channel. + for newChannel := range chans { + // Channels have a type, depending on the application level + // protocol intended. In the case of a shell, the type is + // "session" and ServerShell may be used to present a simple + // terminal interface. + if newChannel.ChannelType() != "session" { + newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") + continue + } + channel, requests, err := newChannel.Accept() + if err != nil { + log.Fatalf("Could not accept channel: %v", err) + } + + // Sessions have out-of-band requests such as "shell", + // "pty-req" and "env". Here we handle only the + // "shell" request. + go func(in <-chan *ssh.Request) { + for req := range in { + req.Reply(req.Type == "shell", nil) + } + }(requests) + + term := terminal.NewTerminal(channel, "> ") + + go func() { + defer channel.Close() + for { + line, err := term.ReadLine() + if err != nil { + break + } + fmt.Println(line) + } + }() + } +} + +func ExampleHostKeyCheck() { + // Every client must provide a host key check. Here is a + // simple-minded parse of OpenSSH's known_hosts file + host := "hostname" + file, err := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "known_hosts")) + if err != nil { + log.Fatal(err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + var hostKey ssh.PublicKey + for scanner.Scan() { + fields := strings.Split(scanner.Text(), " ") + if len(fields) != 3 { + continue + } + if strings.Contains(fields[0], host) { + var err error + hostKey, _, _, _, err = ssh.ParseAuthorizedKey(scanner.Bytes()) + if err != nil { + log.Fatalf("error parsing %q: %v", fields[2], err) + } + break + } + } + + if hostKey == nil { + log.Fatalf("no hostkey for %s", host) + } + + config := ssh.ClientConfig{ + User: os.Getenv("USER"), + HostKeyCallback: ssh.FixedHostKey(hostKey), + } + + _, err = ssh.Dial("tcp", host+":22", &config) + log.Println(err) +} + +func ExampleDial() { + var hostKey ssh.PublicKey + // An SSH client is represented with a ClientConn. + // + // To authenticate with the remote server you must pass at least one + // implementation of AuthMethod via the Auth field in ClientConfig, + // and provide a HostKeyCallback. + config := &ssh.ClientConfig{ + User: "username", + Auth: []ssh.AuthMethod{ + ssh.Password("yourpassword"), + }, + HostKeyCallback: ssh.FixedHostKey(hostKey), + } + client, err := ssh.Dial("tcp", "yourserver.com:22", config) + if err != nil { + log.Fatal("Failed to dial: ", err) + } + + // Each ClientConn can support multiple interactive sessions, + // represented by a Session. + session, err := client.NewSession() + if err != nil { + log.Fatal("Failed to create session: ", err) + } + defer session.Close() + + // Once a Session is created, you can execute a single command on + // the remote side using the Run method. + var b bytes.Buffer + session.Stdout = &b + if err := session.Run("/usr/bin/whoami"); err != nil { + log.Fatal("Failed to run: " + err.Error()) + } + fmt.Println(b.String()) +} + +func ExamplePublicKeys() { + var hostKey ssh.PublicKey + // A public key may be used to authenticate against the remote + // server by using an unencrypted PEM-encoded private key file. + // + // If you have an encrypted private key, the crypto/x509 package + // can be used to decrypt it. + key, err := ioutil.ReadFile("/home/user/.ssh/id_rsa") + if err != nil { + log.Fatalf("unable to read private key: %v", err) + } + + // Create the Signer for this private key. + signer, err := ssh.ParsePrivateKey(key) + if err != nil { + log.Fatalf("unable to parse private key: %v", err) + } + + config := &ssh.ClientConfig{ + User: "user", + Auth: []ssh.AuthMethod{ + // Use the PublicKeys method for remote authentication. + ssh.PublicKeys(signer), + }, + HostKeyCallback: ssh.FixedHostKey(hostKey), + } + + // Connect to the remote server and perform the SSH handshake. + client, err := ssh.Dial("tcp", "host.com:22", config) + if err != nil { + log.Fatalf("unable to connect: %v", err) + } + defer client.Close() +} + +func ExampleClient_Listen() { + var hostKey ssh.PublicKey + config := &ssh.ClientConfig{ + User: "username", + Auth: []ssh.AuthMethod{ + ssh.Password("password"), + }, + HostKeyCallback: ssh.FixedHostKey(hostKey), + } + // Dial your ssh server. + conn, err := ssh.Dial("tcp", "localhost:22", config) + if err != nil { + log.Fatal("unable to connect: ", err) + } + defer conn.Close() + + // Request the remote side to open port 8080 on all interfaces. + l, err := conn.Listen("tcp", "0.0.0.0:8080") + if err != nil { + log.Fatal("unable to register tcp forward: ", err) + } + defer l.Close() + + // Serve HTTP with your SSH server acting as a reverse proxy. + http.Serve(l, http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + fmt.Fprintf(resp, "Hello world!\n") + })) +} + +func ExampleSession_RequestPty() { + var hostKey ssh.PublicKey + // Create client config + config := &ssh.ClientConfig{ + User: "username", + Auth: []ssh.AuthMethod{ + ssh.Password("password"), + }, + HostKeyCallback: ssh.FixedHostKey(hostKey), + } + // Connect to ssh server + conn, err := ssh.Dial("tcp", "localhost:22", config) + if err != nil { + log.Fatal("unable to connect: ", err) + } + defer conn.Close() + // Create a session + session, err := conn.NewSession() + if err != nil { + log.Fatal("unable to create session: ", err) + } + defer session.Close() + // Set up terminal modes + modes := ssh.TerminalModes{ + ssh.ECHO: 0, // disable echoing + ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud + ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud + } + // Request pseudo terminal + if err := session.RequestPty("xterm", 40, 80, modes); err != nil { + log.Fatal("request for pseudo terminal failed: ", err) + } + // Start remote shell + if err := session.Shell(); err != nil { + log.Fatal("failed to start shell: ", err) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/handshake_test.go b/vendor/golang.org/x/crypto/ssh/handshake_test.go new file mode 100644 index 000000000000..91d493568973 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/handshake_test.go @@ -0,0 +1,559 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/rand" + "errors" + "fmt" + "io" + "net" + "reflect" + "runtime" + "strings" + "sync" + "testing" +) + +type testChecker struct { + calls []string +} + +func (t *testChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error { + if dialAddr == "bad" { + return fmt.Errorf("dialAddr is bad") + } + + if tcpAddr, ok := addr.(*net.TCPAddr); !ok || tcpAddr == nil { + return fmt.Errorf("testChecker: got %T want *net.TCPAddr", addr) + } + + t.calls = append(t.calls, fmt.Sprintf("%s %v %s %x", dialAddr, addr, key.Type(), key.Marshal())) + + return nil +} + +// netPipe is analogous to net.Pipe, but it uses a real net.Conn, and +// therefore is buffered (net.Pipe deadlocks if both sides start with +// a write.) +func netPipe() (net.Conn, net.Conn, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + listener, err = net.Listen("tcp", "[::1]:0") + if err != nil { + return nil, nil, err + } + } + defer listener.Close() + c1, err := net.Dial("tcp", listener.Addr().String()) + if err != nil { + return nil, nil, err + } + + c2, err := listener.Accept() + if err != nil { + c1.Close() + return nil, nil, err + } + + return c1, c2, nil +} + +// noiseTransport inserts ignore messages to check that the read loop +// and the key exchange filters out these messages. +type noiseTransport struct { + keyingTransport +} + +func (t *noiseTransport) writePacket(p []byte) error { + ignore := []byte{msgIgnore} + if err := t.keyingTransport.writePacket(ignore); err != nil { + return err + } + debug := []byte{msgDebug, 1, 2, 3} + if err := t.keyingTransport.writePacket(debug); err != nil { + return err + } + + return t.keyingTransport.writePacket(p) +} + +func addNoiseTransport(t keyingTransport) keyingTransport { + return &noiseTransport{t} +} + +// handshakePair creates two handshakeTransports connected with each +// other. If the noise argument is true, both transports will try to +// confuse the other side by sending ignore and debug messages. +func handshakePair(clientConf *ClientConfig, addr string, noise bool) (client *handshakeTransport, server *handshakeTransport, err error) { + a, b, err := netPipe() + if err != nil { + return nil, nil, err + } + + var trC, trS keyingTransport + + trC = newTransport(a, rand.Reader, true) + trS = newTransport(b, rand.Reader, false) + if noise { + trC = addNoiseTransport(trC) + trS = addNoiseTransport(trS) + } + clientConf.SetDefaults() + + v := []byte("version") + client = newClientTransport(trC, v, v, clientConf, addr, a.RemoteAddr()) + + serverConf := &ServerConfig{} + serverConf.AddHostKey(testSigners["ecdsa"]) + serverConf.AddHostKey(testSigners["rsa"]) + serverConf.SetDefaults() + server = newServerTransport(trS, v, v, serverConf) + + if err := server.waitSession(); err != nil { + return nil, nil, fmt.Errorf("server.waitSession: %v", err) + } + if err := client.waitSession(); err != nil { + return nil, nil, fmt.Errorf("client.waitSession: %v", err) + } + + return client, server, nil +} + +func TestHandshakeBasic(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/7237") + } + + checker := &syncChecker{ + waitCall: make(chan int, 10), + called: make(chan int, 10), + } + + checker.waitCall <- 1 + trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + + defer trC.Close() + defer trS.Close() + + // Let first kex complete normally. + <-checker.called + + clientDone := make(chan int, 0) + gotHalf := make(chan int, 0) + const N = 20 + + go func() { + defer close(clientDone) + // Client writes a bunch of stuff, and does a key + // change in the middle. This should not confuse the + // handshake in progress. We do this twice, so we test + // that the packet buffer is reset correctly. + for i := 0; i < N; i++ { + p := []byte{msgRequestSuccess, byte(i)} + if err := trC.writePacket(p); err != nil { + t.Fatalf("sendPacket: %v", err) + } + if (i % 10) == 5 { + <-gotHalf + // halfway through, we request a key change. + trC.requestKeyExchange() + + // Wait until we can be sure the key + // change has really started before we + // write more. + <-checker.called + } + if (i % 10) == 7 { + // write some packets until the kex + // completes, to test buffering of + // packets. + checker.waitCall <- 1 + } + } + }() + + // Server checks that client messages come in cleanly + i := 0 + err = nil + for ; i < N; i++ { + var p []byte + p, err = trS.readPacket() + if err != nil { + break + } + if (i % 10) == 5 { + gotHalf <- 1 + } + + want := []byte{msgRequestSuccess, byte(i)} + if bytes.Compare(p, want) != 0 { + t.Errorf("message %d: got %v, want %v", i, p, want) + } + } + <-clientDone + if err != nil && err != io.EOF { + t.Fatalf("server error: %v", err) + } + if i != N { + t.Errorf("received %d messages, want 10.", i) + } + + close(checker.called) + if _, ok := <-checker.called; ok { + // If all went well, we registered exactly 2 key changes: one + // that establishes the session, and one that we requested + // additionally. + t.Fatalf("got another host key checks after 2 handshakes") + } +} + +func TestForceFirstKex(t *testing.T) { + // like handshakePair, but must access the keyingTransport. + checker := &testChecker{} + clientConf := &ClientConfig{HostKeyCallback: checker.Check} + a, b, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + + var trC, trS keyingTransport + + trC = newTransport(a, rand.Reader, true) + + // This is the disallowed packet: + trC.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})) + + // Rest of the setup. + trS = newTransport(b, rand.Reader, false) + clientConf.SetDefaults() + + v := []byte("version") + client := newClientTransport(trC, v, v, clientConf, "addr", a.RemoteAddr()) + + serverConf := &ServerConfig{} + serverConf.AddHostKey(testSigners["ecdsa"]) + serverConf.AddHostKey(testSigners["rsa"]) + serverConf.SetDefaults() + server := newServerTransport(trS, v, v, serverConf) + + defer client.Close() + defer server.Close() + + // We setup the initial key exchange, but the remote side + // tries to send serviceRequestMsg in cleartext, which is + // disallowed. + + if err := server.waitSession(); err == nil { + t.Errorf("server first kex init should reject unexpected packet") + } +} + +func TestHandshakeAutoRekeyWrite(t *testing.T) { + checker := &syncChecker{ + called: make(chan int, 10), + waitCall: nil, + } + clientConf := &ClientConfig{HostKeyCallback: checker.Check} + clientConf.RekeyThreshold = 500 + trC, trS, err := handshakePair(clientConf, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + defer trC.Close() + defer trS.Close() + + input := make([]byte, 251) + input[0] = msgRequestSuccess + + done := make(chan int, 1) + const numPacket = 5 + go func() { + defer close(done) + j := 0 + for ; j < numPacket; j++ { + if p, err := trS.readPacket(); err != nil { + break + } else if !bytes.Equal(input, p) { + t.Errorf("got packet type %d, want %d", p[0], input[0]) + } + } + + if j != numPacket { + t.Errorf("got %d, want 5 messages", j) + } + }() + + <-checker.called + + for i := 0; i < numPacket; i++ { + p := make([]byte, len(input)) + copy(p, input) + if err := trC.writePacket(p); err != nil { + t.Errorf("writePacket: %v", err) + } + if i == 2 { + // Make sure the kex is in progress. + <-checker.called + } + + } + <-done +} + +type syncChecker struct { + waitCall chan int + called chan int +} + +func (c *syncChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error { + c.called <- 1 + if c.waitCall != nil { + <-c.waitCall + } + return nil +} + +func TestHandshakeAutoRekeyRead(t *testing.T) { + sync := &syncChecker{ + called: make(chan int, 2), + waitCall: nil, + } + clientConf := &ClientConfig{ + HostKeyCallback: sync.Check, + } + clientConf.RekeyThreshold = 500 + + trC, trS, err := handshakePair(clientConf, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + defer trC.Close() + defer trS.Close() + + packet := make([]byte, 501) + packet[0] = msgRequestSuccess + if err := trS.writePacket(packet); err != nil { + t.Fatalf("writePacket: %v", err) + } + + // While we read out the packet, a key change will be + // initiated. + done := make(chan int, 1) + go func() { + defer close(done) + if _, err := trC.readPacket(); err != nil { + t.Fatalf("readPacket(client): %v", err) + } + + }() + + <-done + <-sync.called +} + +// errorKeyingTransport generates errors after a given number of +// read/write operations. +type errorKeyingTransport struct { + packetConn + readLeft, writeLeft int +} + +func (n *errorKeyingTransport) prepareKeyChange(*algorithms, *kexResult) error { + return nil +} + +func (n *errorKeyingTransport) getSessionID() []byte { + return nil +} + +func (n *errorKeyingTransport) writePacket(packet []byte) error { + if n.writeLeft == 0 { + n.Close() + return errors.New("barf") + } + + n.writeLeft-- + return n.packetConn.writePacket(packet) +} + +func (n *errorKeyingTransport) readPacket() ([]byte, error) { + if n.readLeft == 0 { + n.Close() + return nil, errors.New("barf") + } + + n.readLeft-- + return n.packetConn.readPacket() +} + +func TestHandshakeErrorHandlingRead(t *testing.T) { + for i := 0; i < 20; i++ { + testHandshakeErrorHandlingN(t, i, -1, false) + } +} + +func TestHandshakeErrorHandlingWrite(t *testing.T) { + for i := 0; i < 20; i++ { + testHandshakeErrorHandlingN(t, -1, i, false) + } +} + +func TestHandshakeErrorHandlingReadCoupled(t *testing.T) { + for i := 0; i < 20; i++ { + testHandshakeErrorHandlingN(t, i, -1, true) + } +} + +func TestHandshakeErrorHandlingWriteCoupled(t *testing.T) { + for i := 0; i < 20; i++ { + testHandshakeErrorHandlingN(t, -1, i, true) + } +} + +// testHandshakeErrorHandlingN runs handshakes, injecting errors. If +// handshakeTransport deadlocks, the go runtime will detect it and +// panic. +func testHandshakeErrorHandlingN(t *testing.T, readLimit, writeLimit int, coupled bool) { + msg := Marshal(&serviceRequestMsg{strings.Repeat("x", int(minRekeyThreshold)/4)}) + + a, b := memPipe() + defer a.Close() + defer b.Close() + + key := testSigners["ecdsa"] + serverConf := Config{RekeyThreshold: minRekeyThreshold} + serverConf.SetDefaults() + serverConn := newHandshakeTransport(&errorKeyingTransport{a, readLimit, writeLimit}, &serverConf, []byte{'a'}, []byte{'b'}) + serverConn.hostKeys = []Signer{key} + go serverConn.readLoop() + go serverConn.kexLoop() + + clientConf := Config{RekeyThreshold: 10 * minRekeyThreshold} + clientConf.SetDefaults() + clientConn := newHandshakeTransport(&errorKeyingTransport{b, -1, -1}, &clientConf, []byte{'a'}, []byte{'b'}) + clientConn.hostKeyAlgorithms = []string{key.PublicKey().Type()} + clientConn.hostKeyCallback = InsecureIgnoreHostKey() + go clientConn.readLoop() + go clientConn.kexLoop() + + var wg sync.WaitGroup + + for _, hs := range []packetConn{serverConn, clientConn} { + if !coupled { + wg.Add(2) + go func(c packetConn) { + for i := 0; ; i++ { + str := fmt.Sprintf("%08x", i) + strings.Repeat("x", int(minRekeyThreshold)/4-8) + err := c.writePacket(Marshal(&serviceRequestMsg{str})) + if err != nil { + break + } + } + wg.Done() + c.Close() + }(hs) + go func(c packetConn) { + for { + _, err := c.readPacket() + if err != nil { + break + } + } + wg.Done() + }(hs) + } else { + wg.Add(1) + go func(c packetConn) { + for { + _, err := c.readPacket() + if err != nil { + break + } + if err := c.writePacket(msg); err != nil { + break + } + + } + wg.Done() + }(hs) + } + } + wg.Wait() +} + +func TestDisconnect(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/7237") + } + checker := &testChecker{} + trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + + defer trC.Close() + defer trS.Close() + + trC.writePacket([]byte{msgRequestSuccess, 0, 0}) + errMsg := &disconnectMsg{ + Reason: 42, + Message: "such is life", + } + trC.writePacket(Marshal(errMsg)) + trC.writePacket([]byte{msgRequestSuccess, 0, 0}) + + packet, err := trS.readPacket() + if err != nil { + t.Fatalf("readPacket 1: %v", err) + } + if packet[0] != msgRequestSuccess { + t.Errorf("got packet %v, want packet type %d", packet, msgRequestSuccess) + } + + _, err = trS.readPacket() + if err == nil { + t.Errorf("readPacket 2 succeeded") + } else if !reflect.DeepEqual(err, errMsg) { + t.Errorf("got error %#v, want %#v", err, errMsg) + } + + _, err = trS.readPacket() + if err == nil { + t.Errorf("readPacket 3 succeeded") + } +} + +func TestHandshakeRekeyDefault(t *testing.T) { + clientConf := &ClientConfig{ + Config: Config{ + Ciphers: []string{"aes128-ctr"}, + }, + HostKeyCallback: InsecureIgnoreHostKey(), + } + trC, trS, err := handshakePair(clientConf, "addr", false) + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + defer trC.Close() + defer trS.Close() + + trC.writePacket([]byte{msgRequestSuccess, 0, 0}) + trC.Close() + + rgb := (1024 + trC.readBytesLeft) >> 30 + wgb := (1024 + trC.writeBytesLeft) >> 30 + + if rgb != 64 { + t.Errorf("got rekey after %dG read, want 64G", rgb) + } + if wgb != 64 { + t.Errorf("got rekey after %dG write, want 64G", wgb) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/kex_test.go b/vendor/golang.org/x/crypto/ssh/kex_test.go new file mode 100644 index 000000000000..12ca0acd31d0 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/kex_test.go @@ -0,0 +1,50 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Key exchange tests. + +import ( + "crypto/rand" + "reflect" + "testing" +) + +func TestKexes(t *testing.T) { + type kexResultErr struct { + result *kexResult + err error + } + + for name, kex := range kexAlgoMap { + a, b := memPipe() + + s := make(chan kexResultErr, 1) + c := make(chan kexResultErr, 1) + var magics handshakeMagics + go func() { + r, e := kex.Client(a, rand.Reader, &magics) + a.Close() + c <- kexResultErr{r, e} + }() + go func() { + r, e := kex.Server(b, rand.Reader, &magics, testSigners["ecdsa"]) + b.Close() + s <- kexResultErr{r, e} + }() + + clientRes := <-c + serverRes := <-s + if clientRes.err != nil { + t.Errorf("client: %v", clientRes.err) + } + if serverRes.err != nil { + t.Errorf("server: %v", serverRes.err) + } + if !reflect.DeepEqual(clientRes.result, serverRes.result) { + t.Errorf("kex %q: mismatch %#v, %#v", name, clientRes.result, serverRes.result) + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/keys_test.go b/vendor/golang.org/x/crypto/ssh/keys_test.go new file mode 100644 index 000000000000..20ab954e2afa --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/keys_test.go @@ -0,0 +1,500 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "fmt" + "reflect" + "strings" + "testing" + + "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/ssh/testdata" +) + +func rawKey(pub PublicKey) interface{} { + switch k := pub.(type) { + case *rsaPublicKey: + return (*rsa.PublicKey)(k) + case *dsaPublicKey: + return (*dsa.PublicKey)(k) + case *ecdsaPublicKey: + return (*ecdsa.PublicKey)(k) + case ed25519PublicKey: + return (ed25519.PublicKey)(k) + case *Certificate: + return k + } + panic("unknown key type") +} + +func TestKeyMarshalParse(t *testing.T) { + for _, priv := range testSigners { + pub := priv.PublicKey() + roundtrip, err := ParsePublicKey(pub.Marshal()) + if err != nil { + t.Errorf("ParsePublicKey(%T): %v", pub, err) + } + + k1 := rawKey(pub) + k2 := rawKey(roundtrip) + + if !reflect.DeepEqual(k1, k2) { + t.Errorf("got %#v in roundtrip, want %#v", k2, k1) + } + } +} + +func TestUnsupportedCurves(t *testing.T) { + raw, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader) + if err != nil { + t.Fatalf("GenerateKey: %v", err) + } + + if _, err = NewSignerFromKey(raw); err == nil || !strings.Contains(err.Error(), "only P-256") { + t.Fatalf("NewPrivateKey should not succeed with P-224, got: %v", err) + } + + if _, err = NewPublicKey(&raw.PublicKey); err == nil || !strings.Contains(err.Error(), "only P-256") { + t.Fatalf("NewPublicKey should not succeed with P-224, got: %v", err) + } +} + +func TestNewPublicKey(t *testing.T) { + for _, k := range testSigners { + raw := rawKey(k.PublicKey()) + // Skip certificates, as NewPublicKey does not support them. + if _, ok := raw.(*Certificate); ok { + continue + } + pub, err := NewPublicKey(raw) + if err != nil { + t.Errorf("NewPublicKey(%#v): %v", raw, err) + } + if !reflect.DeepEqual(k.PublicKey(), pub) { + t.Errorf("NewPublicKey(%#v) = %#v, want %#v", raw, pub, k.PublicKey()) + } + } +} + +func TestKeySignVerify(t *testing.T) { + for _, priv := range testSigners { + pub := priv.PublicKey() + + data := []byte("sign me") + sig, err := priv.Sign(rand.Reader, data) + if err != nil { + t.Fatalf("Sign(%T): %v", priv, err) + } + + if err := pub.Verify(data, sig); err != nil { + t.Errorf("publicKey.Verify(%T): %v", priv, err) + } + sig.Blob[5]++ + if err := pub.Verify(data, sig); err == nil { + t.Errorf("publicKey.Verify on broken sig did not fail") + } + } +} + +func TestParseRSAPrivateKey(t *testing.T) { + key := testPrivateKeys["rsa"] + + rsa, ok := key.(*rsa.PrivateKey) + if !ok { + t.Fatalf("got %T, want *rsa.PrivateKey", rsa) + } + + if err := rsa.Validate(); err != nil { + t.Errorf("Validate: %v", err) + } +} + +func TestParseECPrivateKey(t *testing.T) { + key := testPrivateKeys["ecdsa"] + + ecKey, ok := key.(*ecdsa.PrivateKey) + if !ok { + t.Fatalf("got %T, want *ecdsa.PrivateKey", ecKey) + } + + if !validateECPublicKey(ecKey.Curve, ecKey.X, ecKey.Y) { + t.Fatalf("public key does not validate.") + } +} + +// See Issue https://github.com/golang/go/issues/6650. +func TestParseEncryptedPrivateKeysFails(t *testing.T) { + const wantSubstring = "encrypted" + for i, tt := range testdata.PEMEncryptedKeys { + _, err := ParsePrivateKey(tt.PEMBytes) + if err == nil { + t.Errorf("#%d key %s: ParsePrivateKey successfully parsed, expected an error", i, tt.Name) + continue + } + + if !strings.Contains(err.Error(), wantSubstring) { + t.Errorf("#%d key %s: got error %q, want substring %q", i, tt.Name, err, wantSubstring) + } + } +} + +// Parse encrypted private keys with passphrase +func TestParseEncryptedPrivateKeysWithPassphrase(t *testing.T) { + data := []byte("sign me") + for _, tt := range testdata.PEMEncryptedKeys { + s, err := ParsePrivateKeyWithPassphrase(tt.PEMBytes, []byte(tt.EncryptionKey)) + if err != nil { + t.Fatalf("ParsePrivateKeyWithPassphrase returned error: %s", err) + continue + } + sig, err := s.Sign(rand.Reader, data) + if err != nil { + t.Fatalf("dsa.Sign: %v", err) + } + if err := s.PublicKey().Verify(data, sig); err != nil { + t.Errorf("Verify failed: %v", err) + } + } + + tt := testdata.PEMEncryptedKeys[0] + _, err := ParsePrivateKeyWithPassphrase(tt.PEMBytes, []byte("incorrect")) + if err != x509.IncorrectPasswordError { + t.Fatalf("got %v want IncorrectPasswordError", err) + } +} + +func TestParseDSA(t *testing.T) { + // We actually exercise the ParsePrivateKey codepath here, as opposed to + // using the ParseRawPrivateKey+NewSignerFromKey path that testdata_test.go + // uses. + s, err := ParsePrivateKey(testdata.PEMBytes["dsa"]) + if err != nil { + t.Fatalf("ParsePrivateKey returned error: %s", err) + } + + data := []byte("sign me") + sig, err := s.Sign(rand.Reader, data) + if err != nil { + t.Fatalf("dsa.Sign: %v", err) + } + + if err := s.PublicKey().Verify(data, sig); err != nil { + t.Errorf("Verify failed: %v", err) + } +} + +// Tests for authorized_keys parsing. + +// getTestKey returns a public key, and its base64 encoding. +func getTestKey() (PublicKey, string) { + k := testPublicKeys["rsa"] + + b := &bytes.Buffer{} + e := base64.NewEncoder(base64.StdEncoding, b) + e.Write(k.Marshal()) + e.Close() + + return k, b.String() +} + +func TestMarshalParsePublicKey(t *testing.T) { + pub, pubSerialized := getTestKey() + line := fmt.Sprintf("%s %s user@host", pub.Type(), pubSerialized) + + authKeys := MarshalAuthorizedKey(pub) + actualFields := strings.Fields(string(authKeys)) + if len(actualFields) == 0 { + t.Fatalf("failed authKeys: %v", authKeys) + } + + // drop the comment + expectedFields := strings.Fields(line)[0:2] + + if !reflect.DeepEqual(actualFields, expectedFields) { + t.Errorf("got %v, expected %v", actualFields, expectedFields) + } + + actPub, _, _, _, err := ParseAuthorizedKey([]byte(line)) + if err != nil { + t.Fatalf("cannot parse %v: %v", line, err) + } + if !reflect.DeepEqual(actPub, pub) { + t.Errorf("got %v, expected %v", actPub, pub) + } +} + +type authResult struct { + pubKey PublicKey + options []string + comments string + rest string + ok bool +} + +func testAuthorizedKeys(t *testing.T, authKeys []byte, expected []authResult) { + rest := authKeys + var values []authResult + for len(rest) > 0 { + var r authResult + var err error + r.pubKey, r.comments, r.options, rest, err = ParseAuthorizedKey(rest) + r.ok = (err == nil) + t.Log(err) + r.rest = string(rest) + values = append(values, r) + } + + if !reflect.DeepEqual(values, expected) { + t.Errorf("got %#v, expected %#v", values, expected) + } +} + +func TestAuthorizedKeyBasic(t *testing.T) { + pub, pubSerialized := getTestKey() + line := "ssh-rsa " + pubSerialized + " user@host" + testAuthorizedKeys(t, []byte(line), + []authResult{ + {pub, nil, "user@host", "", true}, + }) +} + +func TestAuth(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithOptions := []string{ + `# comments to ignore before any keys...`, + ``, + `env="HOME=/home/root",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`, + `# comments to ignore, along with a blank line`, + ``, + `env="HOME=/home/root2" ssh-rsa ` + pubSerialized + ` user2@host2`, + ``, + `# more comments, plus a invalid entry`, + `ssh-rsa data-that-will-not-parse user@host3`, + } + for _, eol := range []string{"\n", "\r\n"} { + authOptions := strings.Join(authWithOptions, eol) + rest2 := strings.Join(authWithOptions[3:], eol) + rest3 := strings.Join(authWithOptions[6:], eol) + testAuthorizedKeys(t, []byte(authOptions), []authResult{ + {pub, []string{`env="HOME=/home/root"`, "no-port-forwarding"}, "user@host", rest2, true}, + {pub, []string{`env="HOME=/home/root2"`}, "user2@host2", rest3, true}, + {nil, nil, "", "", false}, + }) + } +} + +func TestAuthWithQuotedSpaceInEnv(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithQuotedSpaceInEnv := []byte(`env="HOME=/home/root dir",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`) + testAuthorizedKeys(t, []byte(authWithQuotedSpaceInEnv), []authResult{ + {pub, []string{`env="HOME=/home/root dir"`, "no-port-forwarding"}, "user@host", "", true}, + }) +} + +func TestAuthWithQuotedCommaInEnv(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithQuotedCommaInEnv := []byte(`env="HOME=/home/root,dir",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`) + testAuthorizedKeys(t, []byte(authWithQuotedCommaInEnv), []authResult{ + {pub, []string{`env="HOME=/home/root,dir"`, "no-port-forwarding"}, "user@host", "", true}, + }) +} + +func TestAuthWithQuotedQuoteInEnv(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithQuotedQuoteInEnv := []byte(`env="HOME=/home/\"root dir",no-port-forwarding` + "\t" + `ssh-rsa` + "\t" + pubSerialized + ` user@host`) + authWithDoubleQuotedQuote := []byte(`no-port-forwarding,env="HOME=/home/ \"root dir\"" ssh-rsa ` + pubSerialized + "\t" + `user@host`) + testAuthorizedKeys(t, []byte(authWithQuotedQuoteInEnv), []authResult{ + {pub, []string{`env="HOME=/home/\"root dir"`, "no-port-forwarding"}, "user@host", "", true}, + }) + + testAuthorizedKeys(t, []byte(authWithDoubleQuotedQuote), []authResult{ + {pub, []string{"no-port-forwarding", `env="HOME=/home/ \"root dir\""`}, "user@host", "", true}, + }) +} + +func TestAuthWithInvalidSpace(t *testing.T) { + _, pubSerialized := getTestKey() + authWithInvalidSpace := []byte(`env="HOME=/home/root dir", no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host +#more to follow but still no valid keys`) + testAuthorizedKeys(t, []byte(authWithInvalidSpace), []authResult{ + {nil, nil, "", "", false}, + }) +} + +func TestAuthWithMissingQuote(t *testing.T) { + pub, pubSerialized := getTestKey() + authWithMissingQuote := []byte(`env="HOME=/home/root,no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host +env="HOME=/home/root",shared-control ssh-rsa ` + pubSerialized + ` user@host`) + + testAuthorizedKeys(t, []byte(authWithMissingQuote), []authResult{ + {pub, []string{`env="HOME=/home/root"`, `shared-control`}, "user@host", "", true}, + }) +} + +func TestInvalidEntry(t *testing.T) { + authInvalid := []byte(`ssh-rsa`) + _, _, _, _, err := ParseAuthorizedKey(authInvalid) + if err == nil { + t.Errorf("got valid entry for %q", authInvalid) + } +} + +var knownHostsParseTests = []struct { + input string + err string + + marker string + comment string + hosts []string + rest string +}{ + { + "", + "EOF", + + "", "", nil, "", + }, + { + "# Just a comment", + "EOF", + + "", "", nil, "", + }, + { + " \t ", + "EOF", + + "", "", nil, "", + }, + { + "localhost ssh-rsa {RSAPUB}", + "", + + "", "", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}", + "", + + "", "", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment", + "", + + "", "comment comment", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment\n", + "", + + "", "comment comment", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment\r\n", + "", + + "", "comment comment", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment\r\nnext line", + "", + + "", "comment comment", []string{"localhost"}, "next line", + }, + { + "localhost,[host2:123]\tssh-rsa {RSAPUB}\tcomment comment", + "", + + "", "comment comment", []string{"localhost", "[host2:123]"}, "", + }, + { + "@marker \tlocalhost,[host2:123]\tssh-rsa {RSAPUB}", + "", + + "marker", "", []string{"localhost", "[host2:123]"}, "", + }, + { + "@marker \tlocalhost,[host2:123]\tssh-rsa aabbccdd", + "short read", + + "", "", nil, "", + }, +} + +func TestKnownHostsParsing(t *testing.T) { + rsaPub, rsaPubSerialized := getTestKey() + + for i, test := range knownHostsParseTests { + var expectedKey PublicKey + const rsaKeyToken = "{RSAPUB}" + + input := test.input + if strings.Contains(input, rsaKeyToken) { + expectedKey = rsaPub + input = strings.Replace(test.input, rsaKeyToken, rsaPubSerialized, -1) + } + + marker, hosts, pubKey, comment, rest, err := ParseKnownHosts([]byte(input)) + if err != nil { + if len(test.err) == 0 { + t.Errorf("#%d: unexpectedly failed with %q", i, err) + } else if !strings.Contains(err.Error(), test.err) { + t.Errorf("#%d: expected error containing %q, but got %q", i, test.err, err) + } + continue + } else if len(test.err) != 0 { + t.Errorf("#%d: succeeded but expected error including %q", i, test.err) + continue + } + + if !reflect.DeepEqual(expectedKey, pubKey) { + t.Errorf("#%d: expected key %#v, but got %#v", i, expectedKey, pubKey) + } + + if marker != test.marker { + t.Errorf("#%d: expected marker %q, but got %q", i, test.marker, marker) + } + + if comment != test.comment { + t.Errorf("#%d: expected comment %q, but got %q", i, test.comment, comment) + } + + if !reflect.DeepEqual(test.hosts, hosts) { + t.Errorf("#%d: expected hosts %#v, but got %#v", i, test.hosts, hosts) + } + + if rest := string(rest); rest != test.rest { + t.Errorf("#%d: expected remaining input to be %q, but got %q", i, test.rest, rest) + } + } +} + +func TestFingerprintLegacyMD5(t *testing.T) { + pub, _ := getTestKey() + fingerprint := FingerprintLegacyMD5(pub) + want := "fb:61:6d:1a:e3:f0:95:45:3c:a0:79:be:4a:93:63:66" // ssh-keygen -lf -E md5 rsa + if fingerprint != want { + t.Errorf("got fingerprint %q want %q", fingerprint, want) + } +} + +func TestFingerprintSHA256(t *testing.T) { + pub, _ := getTestKey() + fingerprint := FingerprintSHA256(pub) + want := "SHA256:Anr3LjZK8YVpjrxu79myrW9Hrb/wpcMNpVvTq/RcBm8" // ssh-keygen -lf rsa + if fingerprint != want { + t.Errorf("got fingerprint %q want %q", fingerprint, want) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/mempipe_test.go b/vendor/golang.org/x/crypto/ssh/mempipe_test.go new file mode 100644 index 000000000000..8697cd6140a7 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mempipe_test.go @@ -0,0 +1,110 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "sync" + "testing" +) + +// An in-memory packetConn. It is safe to call Close and writePacket +// from different goroutines. +type memTransport struct { + eof bool + pending [][]byte + write *memTransport + sync.Mutex + *sync.Cond +} + +func (t *memTransport) readPacket() ([]byte, error) { + t.Lock() + defer t.Unlock() + for { + if len(t.pending) > 0 { + r := t.pending[0] + t.pending = t.pending[1:] + return r, nil + } + if t.eof { + return nil, io.EOF + } + t.Cond.Wait() + } +} + +func (t *memTransport) closeSelf() error { + t.Lock() + defer t.Unlock() + if t.eof { + return io.EOF + } + t.eof = true + t.Cond.Broadcast() + return nil +} + +func (t *memTransport) Close() error { + err := t.write.closeSelf() + t.closeSelf() + return err +} + +func (t *memTransport) writePacket(p []byte) error { + t.write.Lock() + defer t.write.Unlock() + if t.write.eof { + return io.EOF + } + c := make([]byte, len(p)) + copy(c, p) + t.write.pending = append(t.write.pending, c) + t.write.Cond.Signal() + return nil +} + +func memPipe() (a, b packetConn) { + t1 := memTransport{} + t2 := memTransport{} + t1.write = &t2 + t2.write = &t1 + t1.Cond = sync.NewCond(&t1.Mutex) + t2.Cond = sync.NewCond(&t2.Mutex) + return &t1, &t2 +} + +func TestMemPipe(t *testing.T) { + a, b := memPipe() + if err := a.writePacket([]byte{42}); err != nil { + t.Fatalf("writePacket: %v", err) + } + if err := a.Close(); err != nil { + t.Fatal("Close: ", err) + } + p, err := b.readPacket() + if err != nil { + t.Fatal("readPacket: ", err) + } + if len(p) != 1 || p[0] != 42 { + t.Fatalf("got %v, want {42}", p) + } + p, err = b.readPacket() + if err != io.EOF { + t.Fatalf("got %v, %v, want EOF", p, err) + } +} + +func TestDoubleClose(t *testing.T) { + a, _ := memPipe() + err := a.Close() + if err != nil { + t.Errorf("Close: %v", err) + } + err = a.Close() + if err != io.EOF { + t.Errorf("expect EOF on double close.") + } +} diff --git a/vendor/golang.org/x/crypto/ssh/messages_test.go b/vendor/golang.org/x/crypto/ssh/messages_test.go new file mode 100644 index 000000000000..e79076412ab4 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/messages_test.go @@ -0,0 +1,288 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "math/big" + "math/rand" + "reflect" + "testing" + "testing/quick" +) + +var intLengthTests = []struct { + val, length int +}{ + {0, 4 + 0}, + {1, 4 + 1}, + {127, 4 + 1}, + {128, 4 + 2}, + {-1, 4 + 1}, +} + +func TestIntLength(t *testing.T) { + for _, test := range intLengthTests { + v := new(big.Int).SetInt64(int64(test.val)) + length := intLength(v) + if length != test.length { + t.Errorf("For %d, got length %d but expected %d", test.val, length, test.length) + } + } +} + +type msgAllTypes struct { + Bool bool `sshtype:"21"` + Array [16]byte + Uint64 uint64 + Uint32 uint32 + Uint8 uint8 + String string + Strings []string + Bytes []byte + Int *big.Int + Rest []byte `ssh:"rest"` +} + +func (t *msgAllTypes) Generate(rand *rand.Rand, size int) reflect.Value { + m := &msgAllTypes{} + m.Bool = rand.Intn(2) == 1 + randomBytes(m.Array[:], rand) + m.Uint64 = uint64(rand.Int63n(1<<63 - 1)) + m.Uint32 = uint32(rand.Intn((1 << 31) - 1)) + m.Uint8 = uint8(rand.Intn(1 << 8)) + m.String = string(m.Array[:]) + m.Strings = randomNameList(rand) + m.Bytes = m.Array[:] + m.Int = randomInt(rand) + m.Rest = m.Array[:] + return reflect.ValueOf(m) +} + +func TestMarshalUnmarshal(t *testing.T) { + rand := rand.New(rand.NewSource(0)) + iface := &msgAllTypes{} + ty := reflect.ValueOf(iface).Type() + + n := 100 + if testing.Short() { + n = 5 + } + for j := 0; j < n; j++ { + v, ok := quick.Value(ty, rand) + if !ok { + t.Errorf("failed to create value") + break + } + + m1 := v.Elem().Interface() + m2 := iface + + marshaled := Marshal(m1) + if err := Unmarshal(marshaled, m2); err != nil { + t.Errorf("Unmarshal %#v: %s", m1, err) + break + } + + if !reflect.DeepEqual(v.Interface(), m2) { + t.Errorf("got: %#v\nwant:%#v\n%x", m2, m1, marshaled) + break + } + } +} + +func TestUnmarshalEmptyPacket(t *testing.T) { + var b []byte + var m channelRequestSuccessMsg + if err := Unmarshal(b, &m); err == nil { + t.Fatalf("unmarshal of empty slice succeeded") + } +} + +func TestUnmarshalUnexpectedPacket(t *testing.T) { + type S struct { + I uint32 `sshtype:"43"` + S string + B bool + } + + s := S{11, "hello", true} + packet := Marshal(s) + packet[0] = 42 + roundtrip := S{} + err := Unmarshal(packet, &roundtrip) + if err == nil { + t.Fatal("expected error, not nil") + } +} + +func TestMarshalPtr(t *testing.T) { + s := struct { + S string + }{"hello"} + + m1 := Marshal(s) + m2 := Marshal(&s) + if !bytes.Equal(m1, m2) { + t.Errorf("got %q, want %q for marshaled pointer", m2, m1) + } +} + +func TestBareMarshalUnmarshal(t *testing.T) { + type S struct { + I uint32 + S string + B bool + } + + s := S{42, "hello", true} + packet := Marshal(s) + roundtrip := S{} + Unmarshal(packet, &roundtrip) + + if !reflect.DeepEqual(s, roundtrip) { + t.Errorf("got %#v, want %#v", roundtrip, s) + } +} + +func TestBareMarshal(t *testing.T) { + type S2 struct { + I uint32 + } + s := S2{42} + packet := Marshal(s) + i, rest, ok := parseUint32(packet) + if len(rest) > 0 || !ok { + t.Errorf("parseInt(%q): parse error", packet) + } + if i != s.I { + t.Errorf("got %d, want %d", i, s.I) + } +} + +func TestUnmarshalShortKexInitPacket(t *testing.T) { + // This used to panic. + // Issue 11348 + packet := []byte{0x14, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xff, 0xff, 0xff, 0xff} + kim := &kexInitMsg{} + if err := Unmarshal(packet, kim); err == nil { + t.Error("truncated packet unmarshaled without error") + } +} + +func TestMarshalMultiTag(t *testing.T) { + var res struct { + A uint32 `sshtype:"1|2"` + } + + good1 := struct { + A uint32 `sshtype:"1"` + }{ + 1, + } + good2 := struct { + A uint32 `sshtype:"2"` + }{ + 1, + } + + if e := Unmarshal(Marshal(good1), &res); e != nil { + t.Errorf("error unmarshaling multipart tag: %v", e) + } + + if e := Unmarshal(Marshal(good2), &res); e != nil { + t.Errorf("error unmarshaling multipart tag: %v", e) + } + + bad1 := struct { + A uint32 `sshtype:"3"` + }{ + 1, + } + if e := Unmarshal(Marshal(bad1), &res); e == nil { + t.Errorf("bad struct unmarshaled without error") + } +} + +func randomBytes(out []byte, rand *rand.Rand) { + for i := 0; i < len(out); i++ { + out[i] = byte(rand.Int31()) + } +} + +func randomNameList(rand *rand.Rand) []string { + ret := make([]string, rand.Int31()&15) + for i := range ret { + s := make([]byte, 1+(rand.Int31()&15)) + for j := range s { + s[j] = 'a' + uint8(rand.Int31()&15) + } + ret[i] = string(s) + } + return ret +} + +func randomInt(rand *rand.Rand) *big.Int { + return new(big.Int).SetInt64(int64(int32(rand.Uint32()))) +} + +func (*kexInitMsg) Generate(rand *rand.Rand, size int) reflect.Value { + ki := &kexInitMsg{} + randomBytes(ki.Cookie[:], rand) + ki.KexAlgos = randomNameList(rand) + ki.ServerHostKeyAlgos = randomNameList(rand) + ki.CiphersClientServer = randomNameList(rand) + ki.CiphersServerClient = randomNameList(rand) + ki.MACsClientServer = randomNameList(rand) + ki.MACsServerClient = randomNameList(rand) + ki.CompressionClientServer = randomNameList(rand) + ki.CompressionServerClient = randomNameList(rand) + ki.LanguagesClientServer = randomNameList(rand) + ki.LanguagesServerClient = randomNameList(rand) + if rand.Int31()&1 == 1 { + ki.FirstKexFollows = true + } + return reflect.ValueOf(ki) +} + +func (*kexDHInitMsg) Generate(rand *rand.Rand, size int) reflect.Value { + dhi := &kexDHInitMsg{} + dhi.X = randomInt(rand) + return reflect.ValueOf(dhi) +} + +var ( + _kexInitMsg = new(kexInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface() + _kexDHInitMsg = new(kexDHInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface() + + _kexInit = Marshal(_kexInitMsg) + _kexDHInit = Marshal(_kexDHInitMsg) +) + +func BenchmarkMarshalKexInitMsg(b *testing.B) { + for i := 0; i < b.N; i++ { + Marshal(_kexInitMsg) + } +} + +func BenchmarkUnmarshalKexInitMsg(b *testing.B) { + m := new(kexInitMsg) + for i := 0; i < b.N; i++ { + Unmarshal(_kexInit, m) + } +} + +func BenchmarkMarshalKexDHInitMsg(b *testing.B) { + for i := 0; i < b.N; i++ { + Marshal(_kexDHInitMsg) + } +} + +func BenchmarkUnmarshalKexDHInitMsg(b *testing.B) { + m := new(kexDHInitMsg) + for i := 0; i < b.N; i++ { + Unmarshal(_kexDHInit, m) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/mux_test.go b/vendor/golang.org/x/crypto/ssh/mux_test.go new file mode 100644 index 000000000000..25d2181d6215 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mux_test.go @@ -0,0 +1,505 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "io/ioutil" + "sync" + "testing" +) + +func muxPair() (*mux, *mux) { + a, b := memPipe() + + s := newMux(a) + c := newMux(b) + + return s, c +} + +// Returns both ends of a channel, and the mux for the the 2nd +// channel. +func channelPair(t *testing.T) (*channel, *channel, *mux) { + c, s := muxPair() + + res := make(chan *channel, 1) + go func() { + newCh, ok := <-s.incomingChannels + if !ok { + t.Fatalf("No incoming channel") + } + if newCh.ChannelType() != "chan" { + t.Fatalf("got type %q want chan", newCh.ChannelType()) + } + ch, _, err := newCh.Accept() + if err != nil { + t.Fatalf("Accept %v", err) + } + res <- ch.(*channel) + }() + + ch, err := c.openChannel("chan", nil) + if err != nil { + t.Fatalf("OpenChannel: %v", err) + } + + return <-res, ch, c +} + +// Test that stderr and stdout can be addressed from different +// goroutines. This is intended for use with the race detector. +func TestMuxChannelExtendedThreadSafety(t *testing.T) { + writer, reader, mux := channelPair(t) + defer writer.Close() + defer reader.Close() + defer mux.Close() + + var wr, rd sync.WaitGroup + magic := "hello world" + + wr.Add(2) + go func() { + io.WriteString(writer, magic) + wr.Done() + }() + go func() { + io.WriteString(writer.Stderr(), magic) + wr.Done() + }() + + rd.Add(2) + go func() { + c, err := ioutil.ReadAll(reader) + if string(c) != magic { + t.Fatalf("stdout read got %q, want %q (error %s)", c, magic, err) + } + rd.Done() + }() + go func() { + c, err := ioutil.ReadAll(reader.Stderr()) + if string(c) != magic { + t.Fatalf("stderr read got %q, want %q (error %s)", c, magic, err) + } + rd.Done() + }() + + wr.Wait() + writer.CloseWrite() + rd.Wait() +} + +func TestMuxReadWrite(t *testing.T) { + s, c, mux := channelPair(t) + defer s.Close() + defer c.Close() + defer mux.Close() + + magic := "hello world" + magicExt := "hello stderr" + go func() { + _, err := s.Write([]byte(magic)) + if err != nil { + t.Fatalf("Write: %v", err) + } + _, err = s.Extended(1).Write([]byte(magicExt)) + if err != nil { + t.Fatalf("Write: %v", err) + } + err = s.Close() + if err != nil { + t.Fatalf("Close: %v", err) + } + }() + + var buf [1024]byte + n, err := c.Read(buf[:]) + if err != nil { + t.Fatalf("server Read: %v", err) + } + got := string(buf[:n]) + if got != magic { + t.Fatalf("server: got %q want %q", got, magic) + } + + n, err = c.Extended(1).Read(buf[:]) + if err != nil { + t.Fatalf("server Read: %v", err) + } + + got = string(buf[:n]) + if got != magicExt { + t.Fatalf("server: got %q want %q", got, magic) + } +} + +func TestMuxChannelOverflow(t *testing.T) { + reader, writer, mux := channelPair(t) + defer reader.Close() + defer writer.Close() + defer mux.Close() + + wDone := make(chan int, 1) + go func() { + if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil { + t.Errorf("could not fill window: %v", err) + } + writer.Write(make([]byte, 1)) + wDone <- 1 + }() + writer.remoteWin.waitWriterBlocked() + + // Send 1 byte. + packet := make([]byte, 1+4+4+1) + packet[0] = msgChannelData + marshalUint32(packet[1:], writer.remoteId) + marshalUint32(packet[5:], uint32(1)) + packet[9] = 42 + + if err := writer.mux.conn.writePacket(packet); err != nil { + t.Errorf("could not send packet") + } + if _, err := reader.SendRequest("hello", true, nil); err == nil { + t.Errorf("SendRequest succeeded.") + } + <-wDone +} + +func TestMuxChannelCloseWriteUnblock(t *testing.T) { + reader, writer, mux := channelPair(t) + defer reader.Close() + defer writer.Close() + defer mux.Close() + + wDone := make(chan int, 1) + go func() { + if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil { + t.Errorf("could not fill window: %v", err) + } + if _, err := writer.Write(make([]byte, 1)); err != io.EOF { + t.Errorf("got %v, want EOF for unblock write", err) + } + wDone <- 1 + }() + + writer.remoteWin.waitWriterBlocked() + reader.Close() + <-wDone +} + +func TestMuxConnectionCloseWriteUnblock(t *testing.T) { + reader, writer, mux := channelPair(t) + defer reader.Close() + defer writer.Close() + defer mux.Close() + + wDone := make(chan int, 1) + go func() { + if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil { + t.Errorf("could not fill window: %v", err) + } + if _, err := writer.Write(make([]byte, 1)); err != io.EOF { + t.Errorf("got %v, want EOF for unblock write", err) + } + wDone <- 1 + }() + + writer.remoteWin.waitWriterBlocked() + mux.Close() + <-wDone +} + +func TestMuxReject(t *testing.T) { + client, server := muxPair() + defer server.Close() + defer client.Close() + + go func() { + ch, ok := <-server.incomingChannels + if !ok { + t.Fatalf("Accept") + } + if ch.ChannelType() != "ch" || string(ch.ExtraData()) != "extra" { + t.Fatalf("unexpected channel: %q, %q", ch.ChannelType(), ch.ExtraData()) + } + ch.Reject(RejectionReason(42), "message") + }() + + ch, err := client.openChannel("ch", []byte("extra")) + if ch != nil { + t.Fatal("openChannel not rejected") + } + + ocf, ok := err.(*OpenChannelError) + if !ok { + t.Errorf("got %#v want *OpenChannelError", err) + } else if ocf.Reason != 42 || ocf.Message != "message" { + t.Errorf("got %#v, want {Reason: 42, Message: %q}", ocf, "message") + } + + want := "ssh: rejected: unknown reason 42 (message)" + if err.Error() != want { + t.Errorf("got %q, want %q", err.Error(), want) + } +} + +func TestMuxChannelRequest(t *testing.T) { + client, server, mux := channelPair(t) + defer server.Close() + defer client.Close() + defer mux.Close() + + var received int + var wg sync.WaitGroup + wg.Add(1) + go func() { + for r := range server.incomingRequests { + received++ + r.Reply(r.Type == "yes", nil) + } + wg.Done() + }() + _, err := client.SendRequest("yes", false, nil) + if err != nil { + t.Fatalf("SendRequest: %v", err) + } + ok, err := client.SendRequest("yes", true, nil) + if err != nil { + t.Fatalf("SendRequest: %v", err) + } + + if !ok { + t.Errorf("SendRequest(yes): %v", ok) + + } + + ok, err = client.SendRequest("no", true, nil) + if err != nil { + t.Fatalf("SendRequest: %v", err) + } + if ok { + t.Errorf("SendRequest(no): %v", ok) + + } + + client.Close() + wg.Wait() + + if received != 3 { + t.Errorf("got %d requests, want %d", received, 3) + } +} + +func TestMuxGlobalRequest(t *testing.T) { + clientMux, serverMux := muxPair() + defer serverMux.Close() + defer clientMux.Close() + + var seen bool + go func() { + for r := range serverMux.incomingRequests { + seen = seen || r.Type == "peek" + if r.WantReply { + err := r.Reply(r.Type == "yes", + append([]byte(r.Type), r.Payload...)) + if err != nil { + t.Errorf("AckRequest: %v", err) + } + } + } + }() + + _, _, err := clientMux.SendRequest("peek", false, nil) + if err != nil { + t.Errorf("SendRequest: %v", err) + } + + ok, data, err := clientMux.SendRequest("yes", true, []byte("a")) + if !ok || string(data) != "yesa" || err != nil { + t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v", + ok, data, err) + } + if ok, data, err := clientMux.SendRequest("yes", true, []byte("a")); !ok || string(data) != "yesa" || err != nil { + t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v", + ok, data, err) + } + + if ok, data, err := clientMux.SendRequest("no", true, []byte("a")); ok || string(data) != "noa" || err != nil { + t.Errorf("SendRequest(\"no\", true, \"a\"): %v %v %v", + ok, data, err) + } + + if !seen { + t.Errorf("never saw 'peek' request") + } +} + +func TestMuxGlobalRequestUnblock(t *testing.T) { + clientMux, serverMux := muxPair() + defer serverMux.Close() + defer clientMux.Close() + + result := make(chan error, 1) + go func() { + _, _, err := clientMux.SendRequest("hello", true, nil) + result <- err + }() + + <-serverMux.incomingRequests + serverMux.conn.Close() + err := <-result + + if err != io.EOF { + t.Errorf("want EOF, got %v", io.EOF) + } +} + +func TestMuxChannelRequestUnblock(t *testing.T) { + a, b, connB := channelPair(t) + defer a.Close() + defer b.Close() + defer connB.Close() + + result := make(chan error, 1) + go func() { + _, err := a.SendRequest("hello", true, nil) + result <- err + }() + + <-b.incomingRequests + connB.conn.Close() + err := <-result + + if err != io.EOF { + t.Errorf("want EOF, got %v", err) + } +} + +func TestMuxCloseChannel(t *testing.T) { + r, w, mux := channelPair(t) + defer mux.Close() + defer r.Close() + defer w.Close() + + result := make(chan error, 1) + go func() { + var b [1024]byte + _, err := r.Read(b[:]) + result <- err + }() + if err := w.Close(); err != nil { + t.Errorf("w.Close: %v", err) + } + + if _, err := w.Write([]byte("hello")); err != io.EOF { + t.Errorf("got err %v, want io.EOF after Close", err) + } + + if err := <-result; err != io.EOF { + t.Errorf("got %v (%T), want io.EOF", err, err) + } +} + +func TestMuxCloseWriteChannel(t *testing.T) { + r, w, mux := channelPair(t) + defer mux.Close() + + result := make(chan error, 1) + go func() { + var b [1024]byte + _, err := r.Read(b[:]) + result <- err + }() + if err := w.CloseWrite(); err != nil { + t.Errorf("w.CloseWrite: %v", err) + } + + if _, err := w.Write([]byte("hello")); err != io.EOF { + t.Errorf("got err %v, want io.EOF after CloseWrite", err) + } + + if err := <-result; err != io.EOF { + t.Errorf("got %v (%T), want io.EOF", err, err) + } +} + +func TestMuxInvalidRecord(t *testing.T) { + a, b := muxPair() + defer a.Close() + defer b.Close() + + packet := make([]byte, 1+4+4+1) + packet[0] = msgChannelData + marshalUint32(packet[1:], 29348723 /* invalid channel id */) + marshalUint32(packet[5:], 1) + packet[9] = 42 + + a.conn.writePacket(packet) + go a.SendRequest("hello", false, nil) + // 'a' wrote an invalid packet, so 'b' has exited. + req, ok := <-b.incomingRequests + if ok { + t.Errorf("got request %#v after receiving invalid packet", req) + } +} + +func TestZeroWindowAdjust(t *testing.T) { + a, b, mux := channelPair(t) + defer a.Close() + defer b.Close() + defer mux.Close() + + go func() { + io.WriteString(a, "hello") + // bogus adjust. + a.sendMessage(windowAdjustMsg{}) + io.WriteString(a, "world") + a.Close() + }() + + want := "helloworld" + c, _ := ioutil.ReadAll(b) + if string(c) != want { + t.Errorf("got %q want %q", c, want) + } +} + +func TestMuxMaxPacketSize(t *testing.T) { + a, b, mux := channelPair(t) + defer a.Close() + defer b.Close() + defer mux.Close() + + large := make([]byte, a.maxRemotePayload+1) + packet := make([]byte, 1+4+4+1+len(large)) + packet[0] = msgChannelData + marshalUint32(packet[1:], a.remoteId) + marshalUint32(packet[5:], uint32(len(large))) + packet[9] = 42 + + if err := a.mux.conn.writePacket(packet); err != nil { + t.Errorf("could not send packet") + } + + go a.SendRequest("hello", false, nil) + + _, ok := <-b.incomingRequests + if ok { + t.Errorf("connection still alive after receiving large packet.") + } +} + +// Don't ship code with debug=true. +func TestDebug(t *testing.T) { + if debugMux { + t.Error("mux debug switched on") + } + if debugHandshake { + t.Error("handshake debug switched on") + } + if debugTransport { + t.Error("transport debug switched on") + } +} diff --git a/vendor/golang.org/x/crypto/ssh/session_test.go b/vendor/golang.org/x/crypto/ssh/session_test.go new file mode 100644 index 000000000000..7dce6dd699be --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/session_test.go @@ -0,0 +1,774 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Session tests. + +import ( + "bytes" + crypto_rand "crypto/rand" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "testing" + + "golang.org/x/crypto/ssh/terminal" +) + +type serverType func(Channel, <-chan *Request, *testing.T) + +// dial constructs a new test server and returns a *ClientConn. +func dial(handler serverType, t *testing.T) *Client { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + + go func() { + defer c1.Close() + conf := ServerConfig{ + NoClientAuth: true, + } + conf.AddHostKey(testSigners["rsa"]) + + _, chans, reqs, err := NewServerConn(c1, &conf) + if err != nil { + t.Fatalf("Unable to handshake: %v", err) + } + go DiscardRequests(reqs) + + for newCh := range chans { + if newCh.ChannelType() != "session" { + newCh.Reject(UnknownChannelType, "unknown channel type") + continue + } + + ch, inReqs, err := newCh.Accept() + if err != nil { + t.Errorf("Accept: %v", err) + continue + } + go func() { + handler(ch, inReqs, t) + }() + } + }() + + config := &ClientConfig{ + User: "testuser", + HostKeyCallback: InsecureIgnoreHostKey(), + } + + conn, chans, reqs, err := NewClientConn(c2, "", config) + if err != nil { + t.Fatalf("unable to dial remote side: %v", err) + } + + return NewClient(conn, chans, reqs) +} + +// Test a simple string is returned to session.Stdout. +func TestSessionShell(t *testing.T) { + conn := dial(shellHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + stdout := new(bytes.Buffer) + session.Stdout = stdout + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %s", err) + } + if err := session.Wait(); err != nil { + t.Fatalf("Remote command did not exit cleanly: %v", err) + } + actual := stdout.String() + if actual != "golang" { + t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual) + } +} + +// TODO(dfc) add support for Std{in,err}Pipe when the Server supports it. + +// Test a simple string is returned via StdoutPipe. +func TestSessionStdoutPipe(t *testing.T) { + conn := dial(shellHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + stdout, err := session.StdoutPipe() + if err != nil { + t.Fatalf("Unable to request StdoutPipe(): %v", err) + } + var buf bytes.Buffer + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + done := make(chan bool, 1) + go func() { + if _, err := io.Copy(&buf, stdout); err != nil { + t.Errorf("Copy of stdout failed: %v", err) + } + done <- true + }() + if err := session.Wait(); err != nil { + t.Fatalf("Remote command did not exit cleanly: %v", err) + } + <-done + actual := buf.String() + if actual != "golang" { + t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual) + } +} + +// Test that a simple string is returned via the Output helper, +// and that stderr is discarded. +func TestSessionOutput(t *testing.T) { + conn := dial(fixedOutputHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + + buf, err := session.Output("") // cmd is ignored by fixedOutputHandler + if err != nil { + t.Error("Remote command did not exit cleanly:", err) + } + w := "this-is-stdout." + g := string(buf) + if g != w { + t.Error("Remote command did not return expected string:") + t.Logf("want %q", w) + t.Logf("got %q", g) + } +} + +// Test that both stdout and stderr are returned +// via the CombinedOutput helper. +func TestSessionCombinedOutput(t *testing.T) { + conn := dial(fixedOutputHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + + buf, err := session.CombinedOutput("") // cmd is ignored by fixedOutputHandler + if err != nil { + t.Error("Remote command did not exit cleanly:", err) + } + const stdout = "this-is-stdout." + const stderr = "this-is-stderr." + g := string(buf) + if g != stdout+stderr && g != stderr+stdout { + t.Error("Remote command did not return expected string:") + t.Logf("want %q, or %q", stdout+stderr, stderr+stdout) + t.Logf("got %q", g) + } +} + +// Test non-0 exit status is returned correctly. +func TestExitStatusNonZero(t *testing.T) { + conn := dial(exitStatusNonZeroHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + e, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected *ExitError but got %T", err) + } + if e.ExitStatus() != 15 { + t.Fatalf("expected command to exit with 15 but got %v", e.ExitStatus()) + } +} + +// Test 0 exit status is returned correctly. +func TestExitStatusZero(t *testing.T) { + conn := dial(exitStatusZeroHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err != nil { + t.Fatalf("expected nil but got %v", err) + } +} + +// Test exit signal and status are both returned correctly. +func TestExitSignalAndStatus(t *testing.T) { + conn := dial(exitSignalAndStatusHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + e, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected *ExitError but got %T", err) + } + if e.Signal() != "TERM" || e.ExitStatus() != 15 { + t.Fatalf("expected command to exit with signal TERM and status 15 but got signal %s and status %v", e.Signal(), e.ExitStatus()) + } +} + +// Test exit signal and status are both returned correctly. +func TestKnownExitSignalOnly(t *testing.T) { + conn := dial(exitSignalHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + e, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected *ExitError but got %T", err) + } + if e.Signal() != "TERM" || e.ExitStatus() != 143 { + t.Fatalf("expected command to exit with signal TERM and status 143 but got signal %s and status %v", e.Signal(), e.ExitStatus()) + } +} + +// Test exit signal and status are both returned correctly. +func TestUnknownExitSignal(t *testing.T) { + conn := dial(exitSignalUnknownHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + e, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected *ExitError but got %T", err) + } + if e.Signal() != "SYS" || e.ExitStatus() != 128 { + t.Fatalf("expected command to exit with signal SYS and status 128 but got signal %s and status %v", e.Signal(), e.ExitStatus()) + } +} + +func TestExitWithoutStatusOrSignal(t *testing.T) { + conn := dial(exitWithoutSignalOrStatus, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatalf("Unable to request new session: %v", err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err == nil { + t.Fatalf("expected command to fail but it didn't") + } + if _, ok := err.(*ExitMissingError); !ok { + t.Fatalf("got %T want *ExitMissingError", err) + } +} + +// windowTestBytes is the number of bytes that we'll send to the SSH server. +const windowTestBytes = 16000 * 200 + +// TestServerWindow writes random data to the server. The server is expected to echo +// the same data back, which is compared against the original. +func TestServerWindow(t *testing.T) { + origBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes)) + io.CopyN(origBuf, crypto_rand.Reader, windowTestBytes) + origBytes := origBuf.Bytes() + + conn := dial(echoHandler, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatal(err) + } + defer session.Close() + result := make(chan []byte) + + go func() { + defer close(result) + echoedBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes)) + serverStdout, err := session.StdoutPipe() + if err != nil { + t.Errorf("StdoutPipe failed: %v", err) + return + } + n, err := copyNRandomly("stdout", echoedBuf, serverStdout, windowTestBytes) + if err != nil && err != io.EOF { + t.Errorf("Read only %d bytes from server, expected %d: %v", n, windowTestBytes, err) + } + result <- echoedBuf.Bytes() + }() + + serverStdin, err := session.StdinPipe() + if err != nil { + t.Fatalf("StdinPipe failed: %v", err) + } + written, err := copyNRandomly("stdin", serverStdin, origBuf, windowTestBytes) + if err != nil { + t.Fatalf("failed to copy origBuf to serverStdin: %v", err) + } + if written != windowTestBytes { + t.Fatalf("Wrote only %d of %d bytes to server", written, windowTestBytes) + } + + echoedBytes := <-result + + if !bytes.Equal(origBytes, echoedBytes) { + t.Fatalf("Echoed buffer differed from original, orig %d, echoed %d", len(origBytes), len(echoedBytes)) + } +} + +// Verify the client can handle a keepalive packet from the server. +func TestClientHandlesKeepalives(t *testing.T) { + conn := dial(channelKeepaliveSender, t) + defer conn.Close() + session, err := conn.NewSession() + if err != nil { + t.Fatal(err) + } + defer session.Close() + if err := session.Shell(); err != nil { + t.Fatalf("Unable to execute command: %v", err) + } + err = session.Wait() + if err != nil { + t.Fatalf("expected nil but got: %v", err) + } +} + +type exitStatusMsg struct { + Status uint32 +} + +type exitSignalMsg struct { + Signal string + CoreDumped bool + Errmsg string + Lang string +} + +func handleTerminalRequests(in <-chan *Request) { + for req := range in { + ok := false + switch req.Type { + case "shell": + ok = true + if len(req.Payload) > 0 { + // We don't accept any commands, only the default shell. + ok = false + } + case "env": + ok = true + } + req.Reply(ok, nil) + } +} + +func newServerShell(ch Channel, in <-chan *Request, prompt string) *terminal.Terminal { + term := terminal.NewTerminal(ch, prompt) + go handleTerminalRequests(in) + return term +} + +func exitStatusZeroHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + // this string is returned to stdout + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendStatus(0, ch, t) +} + +func exitStatusNonZeroHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendStatus(15, ch, t) +} + +func exitSignalAndStatusHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendStatus(15, ch, t) + sendSignal("TERM", ch, t) +} + +func exitSignalHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendSignal("TERM", ch, t) +} + +func exitSignalUnknownHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + sendSignal("SYS", ch, t) +} + +func exitWithoutSignalOrStatus(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) +} + +func shellHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + // this string is returned to stdout + shell := newServerShell(ch, in, "golang") + readLine(shell, t) + sendStatus(0, ch, t) +} + +// Ignores the command, writes fixed strings to stderr and stdout. +// Strings are "this-is-stdout." and "this-is-stderr.". +func fixedOutputHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + _, err := ch.Read(nil) + + req, ok := <-in + if !ok { + t.Fatalf("error: expected channel request, got: %#v", err) + return + } + + // ignore request, always send some text + req.Reply(true, nil) + + _, err = io.WriteString(ch, "this-is-stdout.") + if err != nil { + t.Fatalf("error writing on server: %v", err) + } + _, err = io.WriteString(ch.Stderr(), "this-is-stderr.") + if err != nil { + t.Fatalf("error writing on server: %v", err) + } + sendStatus(0, ch, t) +} + +func readLine(shell *terminal.Terminal, t *testing.T) { + if _, err := shell.ReadLine(); err != nil && err != io.EOF { + t.Errorf("unable to read line: %v", err) + } +} + +func sendStatus(status uint32, ch Channel, t *testing.T) { + msg := exitStatusMsg{ + Status: status, + } + if _, err := ch.SendRequest("exit-status", false, Marshal(&msg)); err != nil { + t.Errorf("unable to send status: %v", err) + } +} + +func sendSignal(signal string, ch Channel, t *testing.T) { + sig := exitSignalMsg{ + Signal: signal, + CoreDumped: false, + Errmsg: "Process terminated", + Lang: "en-GB-oed", + } + if _, err := ch.SendRequest("exit-signal", false, Marshal(&sig)); err != nil { + t.Errorf("unable to send signal: %v", err) + } +} + +func discardHandler(ch Channel, t *testing.T) { + defer ch.Close() + io.Copy(ioutil.Discard, ch) +} + +func echoHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + if n, err := copyNRandomly("echohandler", ch, ch, windowTestBytes); err != nil { + t.Errorf("short write, wrote %d, expected %d: %v ", n, windowTestBytes, err) + } +} + +// copyNRandomly copies n bytes from src to dst. It uses a variable, and random, +// buffer size to exercise more code paths. +func copyNRandomly(title string, dst io.Writer, src io.Reader, n int) (int, error) { + var ( + buf = make([]byte, 32*1024) + written int + remaining = n + ) + for remaining > 0 { + l := rand.Intn(1 << 15) + if remaining < l { + l = remaining + } + nr, er := src.Read(buf[:l]) + nw, ew := dst.Write(buf[:nr]) + remaining -= nw + written += nw + if ew != nil { + return written, ew + } + if nr != nw { + return written, io.ErrShortWrite + } + if er != nil && er != io.EOF { + return written, er + } + } + return written, nil +} + +func channelKeepaliveSender(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + shell := newServerShell(ch, in, "> ") + readLine(shell, t) + if _, err := ch.SendRequest("keepalive@openssh.com", true, nil); err != nil { + t.Errorf("unable to send channel keepalive request: %v", err) + } + sendStatus(0, ch, t) +} + +func TestClientWriteEOF(t *testing.T) { + conn := dial(simpleEchoHandler, t) + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + t.Fatal(err) + } + defer session.Close() + stdin, err := session.StdinPipe() + if err != nil { + t.Fatalf("StdinPipe failed: %v", err) + } + stdout, err := session.StdoutPipe() + if err != nil { + t.Fatalf("StdoutPipe failed: %v", err) + } + + data := []byte(`0000`) + _, err = stdin.Write(data) + if err != nil { + t.Fatalf("Write failed: %v", err) + } + stdin.Close() + + res, err := ioutil.ReadAll(stdout) + if err != nil { + t.Fatalf("Read failed: %v", err) + } + + if !bytes.Equal(data, res) { + t.Fatalf("Read differed from write, wrote: %v, read: %v", data, res) + } +} + +func simpleEchoHandler(ch Channel, in <-chan *Request, t *testing.T) { + defer ch.Close() + data, err := ioutil.ReadAll(ch) + if err != nil { + t.Errorf("handler read error: %v", err) + } + _, err = ch.Write(data) + if err != nil { + t.Errorf("handler write error: %v", err) + } +} + +func TestSessionID(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + serverID := make(chan []byte, 1) + clientID := make(chan []byte, 1) + + serverConf := &ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["ecdsa"]) + clientConf := &ClientConfig{ + HostKeyCallback: InsecureIgnoreHostKey(), + User: "user", + } + + go func() { + conn, chans, reqs, err := NewServerConn(c1, serverConf) + if err != nil { + t.Fatalf("server handshake: %v", err) + } + serverID <- conn.SessionID() + go DiscardRequests(reqs) + for ch := range chans { + ch.Reject(Prohibited, "") + } + }() + + go func() { + conn, chans, reqs, err := NewClientConn(c2, "", clientConf) + if err != nil { + t.Fatalf("client handshake: %v", err) + } + clientID <- conn.SessionID() + go DiscardRequests(reqs) + for ch := range chans { + ch.Reject(Prohibited, "") + } + }() + + s := <-serverID + c := <-clientID + if bytes.Compare(s, c) != 0 { + t.Errorf("server session ID (%x) != client session ID (%x)", s, c) + } else if len(s) == 0 { + t.Errorf("client and server SessionID were empty.") + } +} + +type noReadConn struct { + readSeen bool + net.Conn +} + +func (c *noReadConn) Close() error { + return nil +} + +func (c *noReadConn) Read(b []byte) (int, error) { + c.readSeen = true + return 0, errors.New("noReadConn error") +} + +func TestInvalidServerConfiguration(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + serveConn := noReadConn{Conn: c1} + serverConf := &ServerConfig{} + + NewServerConn(&serveConn, serverConf) + if serveConn.readSeen { + t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing host key") + } + + serverConf.AddHostKey(testSigners["ecdsa"]) + + NewServerConn(&serveConn, serverConf) + if serveConn.readSeen { + t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing authentication method") + } +} + +func TestHostKeyAlgorithms(t *testing.T) { + serverConf := &ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["rsa"]) + serverConf.AddHostKey(testSigners["ecdsa"]) + + connect := func(clientConf *ClientConfig, want string) { + var alg string + clientConf.HostKeyCallback = func(h string, a net.Addr, key PublicKey) error { + alg = key.Type() + return nil + } + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewServerConn(c1, serverConf) + _, _, _, err = NewClientConn(c2, "", clientConf) + if err != nil { + t.Fatalf("NewClientConn: %v", err) + } + if alg != want { + t.Errorf("selected key algorithm %s, want %s", alg, want) + } + } + + // By default, we get the preferred algorithm, which is ECDSA 256. + + clientConf := &ClientConfig{ + HostKeyCallback: InsecureIgnoreHostKey(), + } + connect(clientConf, KeyAlgoECDSA256) + + // Client asks for RSA explicitly. + clientConf.HostKeyAlgorithms = []string{KeyAlgoRSA} + connect(clientConf, KeyAlgoRSA) + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewServerConn(c1, serverConf) + clientConf.HostKeyAlgorithms = []string{"nonexistent-hostkey-algo"} + _, _, _, err = NewClientConn(c2, "", clientConf) + if err == nil { + t.Fatal("succeeded connecting with unknown hostkey algorithm") + } +} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip_test.go b/vendor/golang.org/x/crypto/ssh/tcpip_test.go new file mode 100644 index 000000000000..f1265cb4964e --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/tcpip_test.go @@ -0,0 +1,20 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "testing" +) + +func TestAutoPortListenBroken(t *testing.T) { + broken := "SSH-2.0-OpenSSH_5.9hh11" + works := "SSH-2.0-OpenSSH_6.1" + if !isBrokenOpenSSHVersion(broken) { + t.Errorf("version %q not marked as broken", broken) + } + if isBrokenOpenSSHVersion(works) { + t.Errorf("version %q marked as broken", works) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/testdata_test.go b/vendor/golang.org/x/crypto/ssh/testdata_test.go new file mode 100644 index 000000000000..2da8c79dc64a --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/testdata_test.go @@ -0,0 +1,63 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// IMPLEMENTATION NOTE: To avoid a package loop, this file is in three places: +// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three +// instances. + +package ssh + +import ( + "crypto/rand" + "fmt" + + "golang.org/x/crypto/ssh/testdata" +) + +var ( + testPrivateKeys map[string]interface{} + testSigners map[string]Signer + testPublicKeys map[string]PublicKey +) + +func init() { + var err error + + n := len(testdata.PEMBytes) + testPrivateKeys = make(map[string]interface{}, n) + testSigners = make(map[string]Signer, n) + testPublicKeys = make(map[string]PublicKey, n) + for t, k := range testdata.PEMBytes { + testPrivateKeys[t], err = ParseRawPrivateKey(k) + if err != nil { + panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err)) + } + testSigners[t], err = NewSignerFromKey(testPrivateKeys[t]) + if err != nil { + panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err)) + } + testPublicKeys[t] = testSigners[t].PublicKey() + } + + // Create a cert and sign it for use in tests. + testCert := &Certificate{ + Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil + ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage + ValidAfter: 0, // unix epoch + ValidBefore: CertTimeInfinity, // The end of currently representable time. + Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil + Key: testPublicKeys["ecdsa"], + SignatureKey: testPublicKeys["rsa"], + Permissions: Permissions{ + CriticalOptions: map[string]string{}, + Extensions: map[string]string{}, + }, + } + testCert.SignCert(rand.Reader, testSigners["rsa"]) + testPrivateKeys["cert"] = testPrivateKeys["ecdsa"] + testSigners["cert"], err = NewCertSigner(testCert, testSigners["ecdsa"]) + if err != nil { + panic(fmt.Sprintf("Unable to create certificate signer: %v", err)) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/transport_test.go b/vendor/golang.org/x/crypto/ssh/transport_test.go new file mode 100644 index 000000000000..92d83abf93fb --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/transport_test.go @@ -0,0 +1,109 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "strings" + "testing" +) + +func TestReadVersion(t *testing.T) { + longversion := strings.Repeat("SSH-2.0-bla", 50)[:253] + cases := map[string]string{ + "SSH-2.0-bla\r\n": "SSH-2.0-bla", + "SSH-2.0-bla\n": "SSH-2.0-bla", + longversion + "\r\n": longversion, + } + + for in, want := range cases { + result, err := readVersion(bytes.NewBufferString(in)) + if err != nil { + t.Errorf("readVersion(%q): %s", in, err) + } + got := string(result) + if got != want { + t.Errorf("got %q, want %q", got, want) + } + } +} + +func TestReadVersionError(t *testing.T) { + longversion := strings.Repeat("SSH-2.0-bla", 50)[:253] + cases := []string{ + longversion + "too-long\r\n", + } + for _, in := range cases { + if _, err := readVersion(bytes.NewBufferString(in)); err == nil { + t.Errorf("readVersion(%q) should have failed", in) + } + } +} + +func TestExchangeVersionsBasic(t *testing.T) { + v := "SSH-2.0-bla" + buf := bytes.NewBufferString(v + "\r\n") + them, err := exchangeVersions(buf, []byte("xyz")) + if err != nil { + t.Errorf("exchangeVersions: %v", err) + } + + if want := "SSH-2.0-bla"; string(them) != want { + t.Errorf("got %q want %q for our version", them, want) + } +} + +func TestExchangeVersions(t *testing.T) { + cases := []string{ + "not\x000allowed", + "not allowed\n", + } + for _, c := range cases { + buf := bytes.NewBufferString("SSH-2.0-bla\r\n") + if _, err := exchangeVersions(buf, []byte(c)); err == nil { + t.Errorf("exchangeVersions(%q): should have failed", c) + } + } +} + +type closerBuffer struct { + bytes.Buffer +} + +func (b *closerBuffer) Close() error { + return nil +} + +func TestTransportMaxPacketWrite(t *testing.T) { + buf := &closerBuffer{} + tr := newTransport(buf, rand.Reader, true) + huge := make([]byte, maxPacket+1) + err := tr.writePacket(huge) + if err == nil { + t.Errorf("transport accepted write for a huge packet.") + } +} + +func TestTransportMaxPacketReader(t *testing.T) { + var header [5]byte + huge := make([]byte, maxPacket+128) + binary.BigEndian.PutUint32(header[0:], uint32(len(huge))) + // padding. + header[4] = 0 + + buf := &closerBuffer{} + buf.Write(header[:]) + buf.Write(huge) + + tr := newTransport(buf, rand.Reader, true) + _, err := tr.readPacket() + if err == nil { + t.Errorf("transport succeeded reading huge packet.") + } else if !strings.Contains(err.Error(), "large") { + t.Errorf("got %q, should mention %q", err.Error(), "large") + } +} diff --git a/vendor/golang.org/x/net/.gitattributes b/vendor/golang.org/x/net/.gitattributes new file mode 100644 index 000000000000..d2f212e5da80 --- /dev/null +++ b/vendor/golang.org/x/net/.gitattributes @@ -0,0 +1,10 @@ +# Treat all files in this repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. +# +# We'll prevent accidental CRLF line endings from entering the repo +# via the git-review gofmt checks. +# +# See golang.org/issue/9281 + +* -text diff --git a/vendor/golang.org/x/net/.gitignore b/vendor/golang.org/x/net/.gitignore new file mode 100644 index 000000000000..8339fd61d3f4 --- /dev/null +++ b/vendor/golang.org/x/net/.gitignore @@ -0,0 +1,2 @@ +# Add no patterns to .hgignore except for files generated by the build. +last-change diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS new file mode 100644 index 000000000000..15167cd746c5 --- /dev/null +++ b/vendor/golang.org/x/net/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTING.md b/vendor/golang.org/x/net/CONTRIBUTING.md new file mode 100644 index 000000000000..88dff59bc7d2 --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS new file mode 100644 index 000000000000..1c4577e96806 --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/README b/vendor/golang.org/x/net/README new file mode 100644 index 000000000000..6b13d8e50502 --- /dev/null +++ b/vendor/golang.org/x/net/README @@ -0,0 +1,3 @@ +This repository holds supplementary Go networking libraries. + +To submit changes to this repository, see http://golang.org/doc/contribute.html. diff --git a/vendor/golang.org/x/net/codereview.cfg b/vendor/golang.org/x/net/codereview.cfg new file mode 100644 index 000000000000..3f8b14b64e83 --- /dev/null +++ b/vendor/golang.org/x/net/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/net/context/BUILD b/vendor/golang.org/x/net/context/BUILD index 3be45e84d8e1..98e5bd65dd74 100644 --- a/vendor/golang.org/x/net/context/BUILD +++ b/vendor/golang.org/x/net/context/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -13,6 +8,21 @@ go_library( "pre_go17.go", ], importpath = "golang.org/x/net/context", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["context_test.go"], + importpath = "golang.org/x/net/context", + library = ":go_default_library", +) + +go_test( + name = "go_default_xtest", + srcs = ["withtimeout_test.go"], + importpath = "golang.org/x/net/context_test", + deps = [":go_default_library"], ) filegroup( @@ -24,9 +34,7 @@ filegroup( filegroup( name = "all-srcs", - srcs = [ - ":package-srcs", - "//vendor/golang.org/x/net/context/ctxhttp:all-srcs", - ], + srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index ea1a7cd53a12..134654cf7e2b 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -34,7 +34,7 @@ // // See http://blog.golang.org/context for example code for a server that uses // Contexts. -package context +package context // import "golang.org/x/net/context" import "time" diff --git a/vendor/golang.org/x/net/context/context_test.go b/vendor/golang.org/x/net/context/context_test.go new file mode 100644 index 000000000000..62844131bd0a --- /dev/null +++ b/vendor/golang.org/x/net/context/context_test.go @@ -0,0 +1,583 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "fmt" + "math/rand" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +// otherContext is a Context that's not one of the types defined in context.go. +// This lets us test code paths that differ based on the underlying type of the +// Context. +type otherContext struct { + Context +} + +func TestBackground(t *testing.T) { + c := Background() + if c == nil { + t.Fatalf("Background returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.Background"; got != want { + t.Errorf("Background().String() = %q want %q", got, want) + } +} + +func TestTODO(t *testing.T) { + c := TODO() + if c == nil { + t.Fatalf("TODO returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.TODO"; got != want { + t.Errorf("TODO().String() = %q want %q", got, want) + } +} + +func TestWithCancel(t *testing.T) { + c1, cancel := WithCancel(Background()) + + if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { + t.Errorf("c1.String() = %q want %q", got, want) + } + + o := otherContext{c1} + c2, _ := WithCancel(o) + contexts := []Context{c1, o, c2} + + for i, c := range contexts { + if d := c.Done(); d == nil { + t.Errorf("c[%d].Done() == %v want non-nil", i, d) + } + if e := c.Err(); e != nil { + t.Errorf("c[%d].Err() == %v want nil", i, e) + } + + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + } + + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + + for i, c := range contexts { + select { + case <-c.Done(): + default: + t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) + } + if e := c.Err(); e != Canceled { + t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) + } + } +} + +func TestParentFinishesChild(t *testing.T) { + // Context tree: + // parent -> cancelChild + // parent -> valueChild -> timerChild + parent, cancel := WithCancel(Background()) + cancelChild, stop := WithCancel(parent) + defer stop() + valueChild := WithValue(parent, "key", "value") + timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) + defer stop() + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-cancelChild.Done(): + t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) + case x := <-timerChild.Done(): + t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) + case x := <-valueChild.Done(): + t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) + default: + } + + // The parent's children should contain the two cancelable children. + pc := parent.(*cancelCtx) + cc := cancelChild.(*cancelCtx) + tc := timerChild.(*timerCtx) + pc.mu.Lock() + if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { + t.Errorf("bad linkage: pc.children = %v, want %v and %v", + pc.children, cc, tc) + } + pc.mu.Unlock() + + if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) + } + if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) + } + + cancel() + + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) + } + pc.mu.Unlock() + + // parent and children should all be finished. + check := func(ctx Context, name string) { + select { + case <-ctx.Done(): + default: + t.Errorf("<-%s.Done() blocked, but shouldn't have", name) + } + if e := ctx.Err(); e != Canceled { + t.Errorf("%s.Err() == %v want %v", name, e, Canceled) + } + } + check(parent, "parent") + check(cancelChild, "cancelChild") + check(valueChild, "valueChild") + check(timerChild, "timerChild") + + // WithCancel should return a canceled context on a canceled parent. + precanceledChild := WithValue(parent, "key", "value") + select { + case <-precanceledChild.Done(): + default: + t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") + } + if e := precanceledChild.Err(); e != Canceled { + t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) + } +} + +func TestChildFinishesFirst(t *testing.T) { + cancelable, stop := WithCancel(Background()) + defer stop() + for _, parent := range []Context{Background(), cancelable} { + child, cancel := WithCancel(parent) + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-child.Done(): + t.Errorf("<-child.Done() == %v want nothing (it should block)", x) + default: + } + + cc := child.(*cancelCtx) + pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() + if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { + t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) + } + + if pcok { + pc.mu.Lock() + if len(pc.children) != 1 || !pc.children[cc] { + t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) + } + pc.mu.Unlock() + } + + cancel() + + if pcok { + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) + } + pc.mu.Unlock() + } + + // child should be finished. + select { + case <-child.Done(): + default: + t.Errorf("<-child.Done() blocked, but shouldn't have") + } + if e := child.Err(); e != Canceled { + t.Errorf("child.Err() == %v want %v", e, Canceled) + } + + // parent should not be finished. + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + default: + } + if e := parent.Err(); e != nil { + t.Errorf("parent.Err() == %v want nil", e) + } + } +} + +func testDeadline(c Context, wait time.Duration, t *testing.T) { + select { + case <-time.After(wait): + t.Fatalf("context should have timed out") + case <-c.Done(): + } + if e := c.Err(); e != DeadlineExceeded { + t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) + } +} + +func TestDeadline(t *testing.T) { + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithDeadline(Background(), time.Now().Add(1*timeUnit)) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 2*timeUnit, t) + + c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) + o := otherContext{c} + testDeadline(o, 2*timeUnit, t) + + c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) + o = otherContext{c} + c, _ = WithDeadline(o, time.Now().Add(3*timeUnit)) + testDeadline(c, 2*timeUnit, t) +} + +func TestTimeout(t *testing.T) { + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithTimeout(Background(), 1*timeUnit) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 2*timeUnit, t) + + c, _ = WithTimeout(Background(), 1*timeUnit) + o := otherContext{c} + testDeadline(o, 2*timeUnit, t) + + c, _ = WithTimeout(Background(), 1*timeUnit) + o = otherContext{c} + c, _ = WithTimeout(o, 3*timeUnit) + testDeadline(c, 2*timeUnit, t) +} + +func TestCanceledTimeout(t *testing.T) { + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithTimeout(Background(), 2*timeUnit) + o := otherContext{c} + c, cancel := WithTimeout(o, 4*timeUnit) + cancel() + time.Sleep(1 * timeUnit) // let cancelation propagate + select { + case <-c.Done(): + default: + t.Errorf("<-c.Done() blocked, but shouldn't have") + } + if e := c.Err(); e != Canceled { + t.Errorf("c.Err() == %v want %v", e, Canceled) + } +} + +type key1 int +type key2 int + +var k1 = key1(1) +var k2 = key2(1) // same int as k1, different type +var k3 = key2(3) // same type as k2, different int + +func TestValues(t *testing.T) { + check := func(c Context, nm, v1, v2, v3 string) { + if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { + t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) + } + if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { + t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) + } + if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { + t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) + } + } + + c0 := Background() + check(c0, "c0", "", "", "") + + c1 := WithValue(Background(), k1, "c1k1") + check(c1, "c1", "c1k1", "", "") + + if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { + t.Errorf("c.String() = %q want %q", got, want) + } + + c2 := WithValue(c1, k2, "c2k2") + check(c2, "c2", "c1k1", "c2k2", "") + + c3 := WithValue(c2, k3, "c3k3") + check(c3, "c2", "c1k1", "c2k2", "c3k3") + + c4 := WithValue(c3, k1, nil) + check(c4, "c4", "", "c2k2", "c3k3") + + o0 := otherContext{Background()} + check(o0, "o0", "", "", "") + + o1 := otherContext{WithValue(Background(), k1, "c1k1")} + check(o1, "o1", "c1k1", "", "") + + o2 := WithValue(o1, k2, "o2k2") + check(o2, "o2", "c1k1", "o2k2", "") + + o3 := otherContext{c4} + check(o3, "o3", "", "c2k2", "c3k3") + + o4 := WithValue(o3, k3, nil) + check(o4, "o4", "", "c2k2", "") +} + +func TestAllocs(t *testing.T) { + bg := Background() + for _, test := range []struct { + desc string + f func() + limit float64 + gccgoLimit float64 + }{ + { + desc: "Background()", + f: func() { Background() }, + limit: 0, + gccgoLimit: 0, + }, + { + desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), + f: func() { + c := WithValue(bg, k1, nil) + c.Value(k1) + }, + limit: 3, + gccgoLimit: 3, + }, + { + desc: "WithTimeout(bg, 15*time.Millisecond)", + f: func() { + c, _ := WithTimeout(bg, 15*time.Millisecond) + <-c.Done() + }, + limit: 8, + gccgoLimit: 16, + }, + { + desc: "WithCancel(bg)", + f: func() { + c, cancel := WithCancel(bg) + cancel() + <-c.Done() + }, + limit: 5, + gccgoLimit: 8, + }, + { + desc: "WithTimeout(bg, 100*time.Millisecond)", + f: func() { + c, cancel := WithTimeout(bg, 100*time.Millisecond) + cancel() + <-c.Done() + }, + limit: 8, + gccgoLimit: 25, + }, + } { + limit := test.limit + if runtime.Compiler == "gccgo" { + // gccgo does not yet do escape analysis. + // TODO(iant): Remove this when gccgo does do escape analysis. + limit = test.gccgoLimit + } + if n := testing.AllocsPerRun(100, test.f); n > limit { + t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) + } + } +} + +func TestSimultaneousCancels(t *testing.T) { + root, cancel := WithCancel(Background()) + m := map[Context]CancelFunc{root: cancel} + q := []Context{root} + // Create a tree of contexts. + for len(q) != 0 && len(m) < 100 { + parent := q[0] + q = q[1:] + for i := 0; i < 4; i++ { + ctx, cancel := WithCancel(parent) + m[ctx] = cancel + q = append(q, ctx) + } + } + // Start all the cancels in a random order. + var wg sync.WaitGroup + wg.Add(len(m)) + for _, cancel := range m { + go func(cancel CancelFunc) { + cancel() + wg.Done() + }(cancel) + } + // Wait on all the contexts in a random order. + for ctx := range m { + select { + case <-ctx.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) + } + } + // Wait for all the cancel functions to return. + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) + } +} + +func TestInterlockedCancels(t *testing.T) { + parent, cancelParent := WithCancel(Background()) + child, cancelChild := WithCancel(parent) + go func() { + parent.Done() + cancelChild() + }() + cancelParent() + select { + case <-child.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) + } +} + +func TestLayersCancel(t *testing.T) { + testLayers(t, time.Now().UnixNano(), false) +} + +func TestLayersTimeout(t *testing.T) { + testLayers(t, time.Now().UnixNano(), true) +} + +func testLayers(t *testing.T, seed int64, testTimeout bool) { + rand.Seed(seed) + errorf := func(format string, a ...interface{}) { + t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) + } + const ( + timeout = 200 * time.Millisecond + minLayers = 30 + ) + type value int + var ( + vals []*value + cancels []CancelFunc + numTimers int + ctx = Background() + ) + for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { + switch rand.Intn(3) { + case 0: + v := new(value) + ctx = WithValue(ctx, v, v) + vals = append(vals, v) + case 1: + var cancel CancelFunc + ctx, cancel = WithCancel(ctx) + cancels = append(cancels, cancel) + case 2: + var cancel CancelFunc + ctx, cancel = WithTimeout(ctx, timeout) + cancels = append(cancels, cancel) + numTimers++ + } + } + checkValues := func(when string) { + for _, key := range vals { + if val := ctx.Value(key).(*value); key != val { + errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) + } + } + } + select { + case <-ctx.Done(): + errorf("ctx should not be canceled yet") + default: + } + if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { + t.Errorf("ctx.String() = %q want prefix %q", s, prefix) + } + t.Log(ctx) + checkValues("before cancel") + if testTimeout { + select { + case <-ctx.Done(): + case <-time.After(timeout + 100*time.Millisecond): + errorf("ctx should have timed out") + } + checkValues("after timeout") + } else { + cancel := cancels[rand.Intn(len(cancels))] + cancel() + select { + case <-ctx.Done(): + default: + errorf("ctx should be canceled") + } + checkValues("after cancel") + } +} + +func TestCancelRemoves(t *testing.T) { + checkChildren := func(when string, ctx Context, want int) { + if got := len(ctx.(*cancelCtx).children); got != want { + t.Errorf("%s: context has %d children, want %d", when, got, want) + } + } + + ctx, _ := WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel := WithCancel(ctx) + checkChildren("with WithCancel child ", ctx, 1) + cancel() + checkChildren("after cancelling WithCancel child", ctx, 0) + + ctx, _ = WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel = WithTimeout(ctx, 60*time.Minute) + checkChildren("with WithTimeout child ", ctx, 1) + cancel() + checkChildren("after cancelling WithTimeout child", ctx, 0) +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/BUILD b/vendor/golang.org/x/net/context/ctxhttp/BUILD deleted file mode 100644 index 3faa69d0ebec..000000000000 --- a/vendor/golang.org/x/net/context/ctxhttp/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "ctxhttp.go", - "ctxhttp_pre17.go", - ], - importpath = "golang.org/x/net/context/ctxhttp", - deps = ["//vendor/golang.org/x/net/context:go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go deleted file mode 100644 index 22eabffe5f73..000000000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -// Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp - -import ( - "io" - "net/http" - "net/url" - "strings" - - "golang.org/x/net/context" -) - -// Do sends an HTTP request with the provided http.Client and returns -// an HTTP response. -// -// If the client is nil, http.DefaultClient is used. -// -// The provided ctx must be non-nil. If it is canceled or times out, -// ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - resp, err := client.Do(req.WithContext(ctx)) - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err != nil { - select { - case <-ctx.Done(): - err = ctx.Err() - default: - } - } - return resp, err -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go deleted file mode 100644 index 7564b203276a..000000000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package ctxhttp - -import ( - "io" - "net/http" - "net/url" - "strings" - - "golang.org/x/net/context" -) - -func nop() {} - -var ( - testHookContextDoneBeforeHeaders = nop - testHookDoReturned = nop - testHookDidBodyClose = nop -) - -// Do sends an HTTP request with the provided http.Client and returns an HTTP response. -// If the client is nil, http.DefaultClient is used. -// If the context is canceled or times out, ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - - // TODO(djd): Respect any existing value of req.Cancel. - cancel := make(chan struct{}) - req.Cancel = cancel - - type responseAndError struct { - resp *http.Response - err error - } - result := make(chan responseAndError, 1) - - // Make local copies of test hooks closed over by goroutines below. - // Prevents data races in tests. - testHookDoReturned := testHookDoReturned - testHookDidBodyClose := testHookDidBodyClose - - go func() { - resp, err := client.Do(req) - testHookDoReturned() - result <- responseAndError{resp, err} - }() - - var resp *http.Response - - select { - case <-ctx.Done(): - testHookContextDoneBeforeHeaders() - close(cancel) - // Clean up after the goroutine calling client.Do: - go func() { - if r := <-result; r.resp != nil { - testHookDidBodyClose() - r.resp.Body.Close() - } - }() - return nil, ctx.Err() - case r := <-result: - var err error - resp, err = r.resp, r.err - if err != nil { - return resp, err - } - } - - c := make(chan struct{}) - go func() { - select { - case <-ctx.Done(): - close(cancel) - case <-c: - // The response's Body is closed. - } - }() - resp.Body = ¬ifyingReader{resp.Body, c} - - return resp, nil -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} - -// notifyingReader is an io.ReadCloser that closes the notify channel after -// Close is called or a Read fails on the underlying ReadCloser. -type notifyingReader struct { - io.ReadCloser - notify chan<- struct{} -} - -func (r *notifyingReader) Read(p []byte) (int, error) { - n, err := r.ReadCloser.Read(p) - if err != nil && r.notify != nil { - close(r.notify) - r.notify = nil - } - return n, err -} - -func (r *notifyingReader) Close() error { - err := r.ReadCloser.Close() - if r.notify != nil { - close(r.notify) - r.notify = nil - } - return err -} diff --git a/vendor/golang.org/x/net/context/withtimeout_test.go b/vendor/golang.org/x/net/context/withtimeout_test.go new file mode 100644 index 000000000000..a6754dc36892 --- /dev/null +++ b/vendor/golang.org/x/net/context/withtimeout_test.go @@ -0,0 +1,26 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context_test + +import ( + "fmt" + "time" + + "golang.org/x/net/context" +) + +func ExampleWithTimeout() { + // Pass a context with a timeout to tell a blocking function that it + // should abandon its work after the timeout elapses. + ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + select { + case <-time.After(200 * time.Millisecond): + fmt.Println("overslept") + case <-ctx.Done(): + fmt.Println(ctx.Err()) // prints "context deadline exceeded" + } + // Output: + // context deadline exceeded +} diff --git a/vendor/golang.org/x/net/http2/.gitignore b/vendor/golang.org/x/net/http2/.gitignore deleted file mode 100644 index 190f12234adf..000000000000 --- a/vendor/golang.org/x/net/http2/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*~ -h2i/h2i diff --git a/vendor/golang.org/x/net/http2/BUILD b/vendor/golang.org/x/net/http2/BUILD deleted file mode 100644 index 4b156fbf8fbd..000000000000 --- a/vendor/golang.org/x/net/http2/BUILD +++ /dev/null @@ -1,57 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "client_conn_pool.go", - "configure_transport.go", - "errors.go", - "fixed_buffer.go", - "flow.go", - "frame.go", - "go16.go", - "go17.go", - "go17_not18.go", - "go18.go", - "gotrack.go", - "headermap.go", - "http2.go", - "not_go16.go", - "not_go17.go", - "not_go18.go", - "pipe.go", - "server.go", - "transport.go", - "write.go", - "writesched.go", - "writesched_priority.go", - "writesched_random.go", - ], - importpath = "golang.org/x/net/http2", - deps = [ - "//vendor/golang.org/x/net/http2/hpack:go_default_library", - "//vendor/golang.org/x/net/idna:go_default_library", - "//vendor/golang.org/x/net/lex/httplex:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//vendor/golang.org/x/net/http2/hpack:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile deleted file mode 100644 index 53fc52579744..000000000000 --- a/vendor/golang.org/x/net/http2/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# -# This Dockerfile builds a recent curl with HTTP/2 client support, using -# a recent nghttp2 build. -# -# See the Makefile for how to tag it. If Docker and that image is found, the -# Go tests use this curl binary for integration tests. -# - -FROM ubuntu:trusty - -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y git-core build-essential wget - -RUN apt-get install -y --no-install-recommends \ - autotools-dev libtool pkg-config zlib1g-dev \ - libcunit1-dev libssl-dev libxml2-dev libevent-dev \ - automake autoconf - -# The list of packages nghttp2 recommends for h2load: -RUN apt-get install -y --no-install-recommends make binutils \ - autoconf automake autotools-dev \ - libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ - libev-dev libevent-dev libjansson-dev libjemalloc-dev \ - cython python3.4-dev python-setuptools - -# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: -ENV NGHTTP2_VER 895da9a -RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git - -WORKDIR /root/nghttp2 -RUN git reset --hard $NGHTTP2_VER -RUN autoreconf -i -RUN automake -RUN autoconf -RUN ./configure -RUN make -RUN make install - -WORKDIR /root -RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz -RUN tar -zxvf curl-7.45.0.tar.gz -WORKDIR /root/curl-7.45.0 -RUN ./configure --with-ssl --with-nghttp2=/usr/local -RUN make -RUN make install -RUN ldconfig - -CMD ["-h"] -ENTRYPOINT ["/usr/local/bin/curl"] - diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile deleted file mode 100644 index 55fd826f77bd..000000000000 --- a/vendor/golang.org/x/net/http2/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -curlimage: - docker build -t gohttp2/curl . - diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README deleted file mode 100644 index 360d5aa37905..000000000000 --- a/vendor/golang.org/x/net/http2/README +++ /dev/null @@ -1,20 +0,0 @@ -This is a work-in-progress HTTP/2 implementation for Go. - -It will eventually live in the Go standard library and won't require -any changes to your code to use. It will just be automatic. - -Status: - -* The server support is pretty good. A few things are missing - but are being worked on. -* The client work has just started but shares a lot of code - is coming along much quicker. - -Docs are at https://godoc.org/golang.org/x/net/http2 - -Demo test server at https://http2.golang.org/ - -Help & bug reports welcome! - -Contributing: https://golang.org/doc/contribute.html -Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go deleted file mode 100644 index b13941258a35..000000000000 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Transport code's client connection pooling. - -package http2 - -import ( - "crypto/tls" - "net/http" - "sync" -) - -// ClientConnPool manages a pool of HTTP/2 client connections. -type ClientConnPool interface { - GetClientConn(req *http.Request, addr string) (*ClientConn, error) - MarkDead(*ClientConn) -} - -// clientConnPoolIdleCloser is the interface implemented by ClientConnPool -// implementations which can close their idle connections. -type clientConnPoolIdleCloser interface { - ClientConnPool - closeIdleConnections() -} - -var ( - _ clientConnPoolIdleCloser = (*clientConnPool)(nil) - _ clientConnPoolIdleCloser = noDialClientConnPool{} -) - -// TODO: use singleflight for dialing and addConnCalls? -type clientConnPool struct { - t *Transport - - mu sync.Mutex // TODO: maybe switch to RWMutex - // TODO: add support for sharing conns based on cert names - // (e.g. share conn for googleapis.com and appspot.com) - conns map[string][]*ClientConn // key is host:port - dialing map[string]*dialCall // currently in-flight dials - keys map[*ClientConn][]string - addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls -} - -func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - return p.getClientConn(req, addr, dialOnMiss) -} - -const ( - dialOnMiss = true - noDialOnMiss = false -) - -func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { - if isConnectionCloseRequest(req) && dialOnMiss { - // It gets its own connection. - const singleUse = true - cc, err := p.t.dialClientConn(addr, singleUse) - if err != nil { - return nil, err - } - return cc, nil - } - p.mu.Lock() - for _, cc := range p.conns[addr] { - if cc.CanTakeNewRequest() { - p.mu.Unlock() - return cc, nil - } - } - if !dialOnMiss { - p.mu.Unlock() - return nil, ErrNoCachedConn - } - call := p.getStartDialLocked(addr) - p.mu.Unlock() - <-call.done - return call.res, call.err -} - -// dialCall is an in-flight Transport dial call to a host. -type dialCall struct { - p *clientConnPool - done chan struct{} // closed when done - res *ClientConn // valid after done is closed - err error // valid after done is closed -} - -// requires p.mu is held. -func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { - if call, ok := p.dialing[addr]; ok { - // A dial is already in-flight. Don't start another. - return call - } - call := &dialCall{p: p, done: make(chan struct{})} - if p.dialing == nil { - p.dialing = make(map[string]*dialCall) - } - p.dialing[addr] = call - go call.dial(addr) - return call -} - -// run in its own goroutine. -func (c *dialCall) dial(addr string) { - const singleUse = false // shared conn - c.res, c.err = c.p.t.dialClientConn(addr, singleUse) - close(c.done) - - c.p.mu.Lock() - delete(c.p.dialing, addr) - if c.err == nil { - c.p.addConnLocked(addr, c.res) - } - c.p.mu.Unlock() -} - -// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't -// already exist. It coalesces concurrent calls with the same key. -// This is used by the http1 Transport code when it creates a new connection. Because -// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know -// the protocol), it can get into a situation where it has multiple TLS connections. -// This code decides which ones live or die. -// The return value used is whether c was used. -// c is never closed. -func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { - p.mu.Lock() - for _, cc := range p.conns[key] { - if cc.CanTakeNewRequest() { - p.mu.Unlock() - return false, nil - } - } - call, dup := p.addConnCalls[key] - if !dup { - if p.addConnCalls == nil { - p.addConnCalls = make(map[string]*addConnCall) - } - call = &addConnCall{ - p: p, - done: make(chan struct{}), - } - p.addConnCalls[key] = call - go call.run(t, key, c) - } - p.mu.Unlock() - - <-call.done - if call.err != nil { - return false, call.err - } - return !dup, nil -} - -type addConnCall struct { - p *clientConnPool - done chan struct{} // closed when done - err error -} - -func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { - cc, err := t.NewClientConn(tc) - - p := c.p - p.mu.Lock() - if err != nil { - c.err = err - } else { - p.addConnLocked(key, cc) - } - delete(p.addConnCalls, key) - p.mu.Unlock() - close(c.done) -} - -func (p *clientConnPool) addConn(key string, cc *ClientConn) { - p.mu.Lock() - p.addConnLocked(key, cc) - p.mu.Unlock() -} - -// p.mu must be held -func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { - for _, v := range p.conns[key] { - if v == cc { - return - } - } - if p.conns == nil { - p.conns = make(map[string][]*ClientConn) - } - if p.keys == nil { - p.keys = make(map[*ClientConn][]string) - } - p.conns[key] = append(p.conns[key], cc) - p.keys[cc] = append(p.keys[cc], key) -} - -func (p *clientConnPool) MarkDead(cc *ClientConn) { - p.mu.Lock() - defer p.mu.Unlock() - for _, key := range p.keys[cc] { - vv, ok := p.conns[key] - if !ok { - continue - } - newList := filterOutClientConn(vv, cc) - if len(newList) > 0 { - p.conns[key] = newList - } else { - delete(p.conns, key) - } - } - delete(p.keys, cc) -} - -func (p *clientConnPool) closeIdleConnections() { - p.mu.Lock() - defer p.mu.Unlock() - // TODO: don't close a cc if it was just added to the pool - // milliseconds ago and has never been used. There's currently - // a small race window with the HTTP/1 Transport's integration - // where it can add an idle conn just before using it, and - // somebody else can concurrently call CloseIdleConns and - // break some caller's RoundTrip. - for _, vv := range p.conns { - for _, cc := range vv { - cc.closeIfIdle() - } - } -} - -func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { - out := in[:0] - for _, v := range in { - if v != exclude { - out = append(out, v) - } - } - // If we filtered it out, zero out the last item to prevent - // the GC from seeing it. - if len(in) != len(out) { - in[len(in)-1] = nil - } - return out -} - -// noDialClientConnPool is an implementation of http2.ClientConnPool -// which never dials. We let the HTTP/1.1 client dial and use its TLS -// connection instead. -type noDialClientConnPool struct{ *clientConnPool } - -func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - return p.getClientConn(req, addr, noDialOnMiss) -} diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go deleted file mode 100644 index 4f720f530b00..000000000000 --- a/vendor/golang.org/x/net/http2/configure_transport.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.6 - -package http2 - -import ( - "crypto/tls" - "fmt" - "net/http" -) - -func configureTransport(t1 *http.Transport) (*Transport, error) { - connPool := new(clientConnPool) - t2 := &Transport{ - ConnPool: noDialClientConnPool{connPool}, - t1: t1, - } - connPool.t = t2 - if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { - return nil, err - } - if t1.TLSClientConfig == nil { - t1.TLSClientConfig = new(tls.Config) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { - t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { - t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") - } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) - if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { - go c.Close() - return erringRoundTripper{err} - } else if !used { - // Turns out we don't need this c. - // For example, two goroutines made requests to the same host - // at the same time, both kicking off TCP dials. (since protocol - // was unknown) - go c.Close() - } - return t2 - } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, - } - } else { - m["h2"] = upgradeFn - } - return t2, nil -} - -// registerHTTPSProtocol calls Transport.RegisterProtocol but -// convering panics into errors. -func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("%v", e) - } - }() - t.RegisterProtocol("https", rt) - return nil -} - -// noDialH2RoundTripper is a RoundTripper which only tries to complete the request -// if there's already has a cached connection to the host. -type noDialH2RoundTripper struct{ t *Transport } - -func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - res, err := rt.t.RoundTrip(req) - if err == ErrNoCachedConn { - return nil, http.ErrSkipAltProtocol - } - return res, err -} diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go deleted file mode 100644 index 20fd7626a56d..000000000000 --- a/vendor/golang.org/x/net/http2/errors.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "fmt" -) - -// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. -type ErrCode uint32 - -const ( - ErrCodeNo ErrCode = 0x0 - ErrCodeProtocol ErrCode = 0x1 - ErrCodeInternal ErrCode = 0x2 - ErrCodeFlowControl ErrCode = 0x3 - ErrCodeSettingsTimeout ErrCode = 0x4 - ErrCodeStreamClosed ErrCode = 0x5 - ErrCodeFrameSize ErrCode = 0x6 - ErrCodeRefusedStream ErrCode = 0x7 - ErrCodeCancel ErrCode = 0x8 - ErrCodeCompression ErrCode = 0x9 - ErrCodeConnect ErrCode = 0xa - ErrCodeEnhanceYourCalm ErrCode = 0xb - ErrCodeInadequateSecurity ErrCode = 0xc - ErrCodeHTTP11Required ErrCode = 0xd -) - -var errCodeName = map[ErrCode]string{ - ErrCodeNo: "NO_ERROR", - ErrCodeProtocol: "PROTOCOL_ERROR", - ErrCodeInternal: "INTERNAL_ERROR", - ErrCodeFlowControl: "FLOW_CONTROL_ERROR", - ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", - ErrCodeStreamClosed: "STREAM_CLOSED", - ErrCodeFrameSize: "FRAME_SIZE_ERROR", - ErrCodeRefusedStream: "REFUSED_STREAM", - ErrCodeCancel: "CANCEL", - ErrCodeCompression: "COMPRESSION_ERROR", - ErrCodeConnect: "CONNECT_ERROR", - ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", - ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", - ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", -} - -func (e ErrCode) String() string { - if s, ok := errCodeName[e]; ok { - return s - } - return fmt.Sprintf("unknown error code 0x%x", uint32(e)) -} - -// ConnectionError is an error that results in the termination of the -// entire connection. -type ConnectionError ErrCode - -func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } - -// StreamError is an error that only affects one stream within an -// HTTP/2 connection. -type StreamError struct { - StreamID uint32 - Code ErrCode - Cause error // optional additional detail -} - -func streamError(id uint32, code ErrCode) StreamError { - return StreamError{StreamID: id, Code: code} -} - -func (e StreamError) Error() string { - if e.Cause != nil { - return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) - } - return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) -} - -// 6.9.1 The Flow Control Window -// "If a sender receives a WINDOW_UPDATE that causes a flow control -// window to exceed this maximum it MUST terminate either the stream -// or the connection, as appropriate. For streams, [...]; for the -// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." -type goAwayFlowError struct{} - -func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } - -// connErrorReason wraps a ConnectionError with an informative error about why it occurs. - -// Errors of this type are only returned by the frame parser functions -// and converted into ConnectionError(ErrCodeProtocol). -type connError struct { - Code ErrCode - Reason string -} - -func (e connError) Error() string { - return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) -} - -type pseudoHeaderError string - -func (e pseudoHeaderError) Error() string { - return fmt.Sprintf("invalid pseudo-header %q", string(e)) -} - -type duplicatePseudoHeaderError string - -func (e duplicatePseudoHeaderError) Error() string { - return fmt.Sprintf("duplicate pseudo-header %q", string(e)) -} - -type headerFieldNameError string - -func (e headerFieldNameError) Error() string { - return fmt.Sprintf("invalid header field name %q", string(e)) -} - -type headerFieldValueError string - -func (e headerFieldValueError) Error() string { - return fmt.Sprintf("invalid header field value %q", string(e)) -} - -var ( - errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") - errPseudoAfterRegular = errors.New("pseudo header field after regular") -) diff --git a/vendor/golang.org/x/net/http2/fixed_buffer.go b/vendor/golang.org/x/net/http2/fixed_buffer.go deleted file mode 100644 index 47da0f0bf71f..000000000000 --- a/vendor/golang.org/x/net/http2/fixed_buffer.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" -) - -// fixedBuffer is an io.ReadWriter backed by a fixed size buffer. -// It never allocates, but moves old data as new data is written. -type fixedBuffer struct { - buf []byte - r, w int -} - -var ( - errReadEmpty = errors.New("read from empty fixedBuffer") - errWriteFull = errors.New("write on full fixedBuffer") -) - -// Read copies bytes from the buffer into p. -// It is an error to read when no data is available. -func (b *fixedBuffer) Read(p []byte) (n int, err error) { - if b.r == b.w { - return 0, errReadEmpty - } - n = copy(p, b.buf[b.r:b.w]) - b.r += n - if b.r == b.w { - b.r = 0 - b.w = 0 - } - return n, nil -} - -// Len returns the number of bytes of the unread portion of the buffer. -func (b *fixedBuffer) Len() int { - return b.w - b.r -} - -// Write copies bytes from p into the buffer. -// It is an error to write more data than the buffer can hold. -func (b *fixedBuffer) Write(p []byte) (n int, err error) { - // Slide existing data to beginning. - if b.r > 0 && len(p) > len(b.buf)-b.w { - copy(b.buf, b.buf[b.r:b.w]) - b.w -= b.r - b.r = 0 - } - - // Write new data. - n = copy(b.buf[b.w:], p) - b.w += n - if n < len(p) { - err = errWriteFull - } - return n, err -} diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go deleted file mode 100644 index 957de25420d9..000000000000 --- a/vendor/golang.org/x/net/http2/flow.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Flow control - -package http2 - -// flow is the flow control window's size. -type flow struct { - // n is the number of DATA bytes we're allowed to send. - // A flow is kept both on a conn and a per-stream. - n int32 - - // conn points to the shared connection-level flow that is - // shared by all streams on that conn. It is nil for the flow - // that's on the conn directly. - conn *flow -} - -func (f *flow) setConnFlow(cf *flow) { f.conn = cf } - -func (f *flow) available() int32 { - n := f.n - if f.conn != nil && f.conn.n < n { - n = f.conn.n - } - return n -} - -func (f *flow) take(n int32) { - if n > f.available() { - panic("internal error: took too much") - } - f.n -= n - if f.conn != nil { - f.conn.n -= n - } -} - -// add adds n bytes (positive or negative) to the flow control window. -// It returns false if the sum would exceed 2^31-1. -func (f *flow) add(n int32) bool { - remain := (1<<31 - 1) - f.n - if n > remain { - return false - } - f.n += n - return true -} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go deleted file mode 100644 index 358833fedfe4..000000000000 --- a/vendor/golang.org/x/net/http2/frame.go +++ /dev/null @@ -1,1544 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "strings" - "sync" - - "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" -) - -const frameHeaderLen = 9 - -var padZeros = make([]byte, 255) // zeros for padding - -// A FrameType is a registered frame type as defined in -// http://http2.github.io/http2-spec/#rfc.section.11.2 -type FrameType uint8 - -const ( - FrameData FrameType = 0x0 - FrameHeaders FrameType = 0x1 - FramePriority FrameType = 0x2 - FrameRSTStream FrameType = 0x3 - FrameSettings FrameType = 0x4 - FramePushPromise FrameType = 0x5 - FramePing FrameType = 0x6 - FrameGoAway FrameType = 0x7 - FrameWindowUpdate FrameType = 0x8 - FrameContinuation FrameType = 0x9 -) - -var frameName = map[FrameType]string{ - FrameData: "DATA", - FrameHeaders: "HEADERS", - FramePriority: "PRIORITY", - FrameRSTStream: "RST_STREAM", - FrameSettings: "SETTINGS", - FramePushPromise: "PUSH_PROMISE", - FramePing: "PING", - FrameGoAway: "GOAWAY", - FrameWindowUpdate: "WINDOW_UPDATE", - FrameContinuation: "CONTINUATION", -} - -func (t FrameType) String() string { - if s, ok := frameName[t]; ok { - return s - } - return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) -} - -// Flags is a bitmask of HTTP/2 flags. -// The meaning of flags varies depending on the frame type. -type Flags uint8 - -// Has reports whether f contains all (0 or more) flags in v. -func (f Flags) Has(v Flags) bool { - return (f & v) == v -} - -// Frame-specific FrameHeader flag bits. -const ( - // Data Frame - FlagDataEndStream Flags = 0x1 - FlagDataPadded Flags = 0x8 - - // Headers Frame - FlagHeadersEndStream Flags = 0x1 - FlagHeadersEndHeaders Flags = 0x4 - FlagHeadersPadded Flags = 0x8 - FlagHeadersPriority Flags = 0x20 - - // Settings Frame - FlagSettingsAck Flags = 0x1 - - // Ping Frame - FlagPingAck Flags = 0x1 - - // Continuation Frame - FlagContinuationEndHeaders Flags = 0x4 - - FlagPushPromiseEndHeaders Flags = 0x4 - FlagPushPromisePadded Flags = 0x8 -) - -var flagName = map[FrameType]map[Flags]string{ - FrameData: { - FlagDataEndStream: "END_STREAM", - FlagDataPadded: "PADDED", - }, - FrameHeaders: { - FlagHeadersEndStream: "END_STREAM", - FlagHeadersEndHeaders: "END_HEADERS", - FlagHeadersPadded: "PADDED", - FlagHeadersPriority: "PRIORITY", - }, - FrameSettings: { - FlagSettingsAck: "ACK", - }, - FramePing: { - FlagPingAck: "ACK", - }, - FrameContinuation: { - FlagContinuationEndHeaders: "END_HEADERS", - }, - FramePushPromise: { - FlagPushPromiseEndHeaders: "END_HEADERS", - FlagPushPromisePadded: "PADDED", - }, -} - -// a frameParser parses a frame given its FrameHeader and payload -// bytes. The length of payload will always equal fh.Length (which -// might be 0). -type frameParser func(fh FrameHeader, payload []byte) (Frame, error) - -var frameParsers = map[FrameType]frameParser{ - FrameData: parseDataFrame, - FrameHeaders: parseHeadersFrame, - FramePriority: parsePriorityFrame, - FrameRSTStream: parseRSTStreamFrame, - FrameSettings: parseSettingsFrame, - FramePushPromise: parsePushPromise, - FramePing: parsePingFrame, - FrameGoAway: parseGoAwayFrame, - FrameWindowUpdate: parseWindowUpdateFrame, - FrameContinuation: parseContinuationFrame, -} - -func typeFrameParser(t FrameType) frameParser { - if f := frameParsers[t]; f != nil { - return f - } - return parseUnknownFrame -} - -// A FrameHeader is the 9 byte header of all HTTP/2 frames. -// -// See http://http2.github.io/http2-spec/#FrameHeader -type FrameHeader struct { - valid bool // caller can access []byte fields in the Frame - - // Type is the 1 byte frame type. There are ten standard frame - // types, but extension frame types may be written by WriteRawFrame - // and will be returned by ReadFrame (as UnknownFrame). - Type FrameType - - // Flags are the 1 byte of 8 potential bit flags per frame. - // They are specific to the frame type. - Flags Flags - - // Length is the length of the frame, not including the 9 byte header. - // The maximum size is one byte less than 16MB (uint24), but only - // frames up to 16KB are allowed without peer agreement. - Length uint32 - - // StreamID is which stream this frame is for. Certain frames - // are not stream-specific, in which case this field is 0. - StreamID uint32 -} - -// Header returns h. It exists so FrameHeaders can be embedded in other -// specific frame types and implement the Frame interface. -func (h FrameHeader) Header() FrameHeader { return h } - -func (h FrameHeader) String() string { - var buf bytes.Buffer - buf.WriteString("[FrameHeader ") - h.writeDebug(&buf) - buf.WriteByte(']') - return buf.String() -} - -func (h FrameHeader) writeDebug(buf *bytes.Buffer) { - buf.WriteString(h.Type.String()) - if h.Flags != 0 { - buf.WriteString(" flags=") - set := 0 - for i := uint8(0); i < 8; i++ { - if h.Flags&(1< 1 { - buf.WriteByte('|') - } - name := flagName[h.Type][Flags(1<>24), - byte(streamID>>16), - byte(streamID>>8), - byte(streamID)) -} - -func (f *Framer) endWrite() error { - // Now that we know the final size, fill in the FrameHeader in - // the space previously reserved for it. Abuse append. - length := len(f.wbuf) - frameHeaderLen - if length >= (1 << 24) { - return ErrFrameTooLarge - } - _ = append(f.wbuf[:0], - byte(length>>16), - byte(length>>8), - byte(length)) - if f.logWrites { - f.logWrite() - } - - n, err := f.w.Write(f.wbuf) - if err == nil && n != len(f.wbuf) { - err = io.ErrShortWrite - } - return err -} - -func (f *Framer) logWrite() { - if f.debugFramer == nil { - f.debugFramerBuf = new(bytes.Buffer) - f.debugFramer = NewFramer(nil, f.debugFramerBuf) - f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below - // Let us read anything, even if we accidentally wrote it - // in the wrong order: - f.debugFramer.AllowIllegalReads = true - } - f.debugFramerBuf.Write(f.wbuf) - fr, err := f.debugFramer.ReadFrame() - if err != nil { - f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f) - return - } - f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) -} - -func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } -func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } -func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } -func (f *Framer) writeUint32(v uint32) { - f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -const ( - minMaxFrameSize = 1 << 14 - maxFrameSize = 1<<24 - 1 -) - -// NewFramer returns a Framer that writes frames to w and reads them from r. -func NewFramer(w io.Writer, r io.Reader) *Framer { - fr := &Framer{ - w: w, - r: r, - logReads: logFrameReads, - logWrites: logFrameWrites, - debugReadLoggerf: log.Printf, - debugWriteLoggerf: log.Printf, - } - fr.getReadBuf = func(size uint32) []byte { - if cap(fr.readBuf) >= int(size) { - return fr.readBuf[:size] - } - fr.readBuf = make([]byte, size) - return fr.readBuf - } - fr.SetMaxReadFrameSize(maxFrameSize) - return fr -} - -// SetMaxReadFrameSize sets the maximum size of a frame -// that will be read by a subsequent call to ReadFrame. -// It is the caller's responsibility to advertise this -// limit with a SETTINGS frame. -func (fr *Framer) SetMaxReadFrameSize(v uint32) { - if v > maxFrameSize { - v = maxFrameSize - } - fr.maxReadSize = v -} - -// ErrorDetail returns a more detailed error of the last error -// returned by Framer.ReadFrame. For instance, if ReadFrame -// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail -// will say exactly what was invalid. ErrorDetail is not guaranteed -// to return a non-nil value and like the rest of the http2 package, -// its return value is not protected by an API compatibility promise. -// ErrorDetail is reset after the next call to ReadFrame. -func (fr *Framer) ErrorDetail() error { - return fr.errDetail -} - -// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer -// sends a frame that is larger than declared with SetMaxReadFrameSize. -var ErrFrameTooLarge = errors.New("http2: frame too large") - -// terminalReadFrameError reports whether err is an unrecoverable -// error from ReadFrame and no other frames should be read. -func terminalReadFrameError(err error) bool { - if _, ok := err.(StreamError); ok { - return false - } - return err != nil -} - -// ReadFrame reads a single frame. The returned Frame is only valid -// until the next call to ReadFrame. -// -// If the frame is larger than previously set with SetMaxReadFrameSize, the -// returned error is ErrFrameTooLarge. Other errors may be of type -// ConnectionError, StreamError, or anything else from the underlying -// reader. -func (fr *Framer) ReadFrame() (Frame, error) { - fr.errDetail = nil - if fr.lastFrame != nil { - fr.lastFrame.invalidate() - } - fh, err := readFrameHeader(fr.headerBuf[:], fr.r) - if err != nil { - return nil, err - } - if fh.Length > fr.maxReadSize { - return nil, ErrFrameTooLarge - } - payload := fr.getReadBuf(fh.Length) - if _, err := io.ReadFull(fr.r, payload); err != nil { - return nil, err - } - f, err := typeFrameParser(fh.Type)(fh, payload) - if err != nil { - if ce, ok := err.(connError); ok { - return nil, fr.connError(ce.Code, ce.Reason) - } - return nil, err - } - if err := fr.checkFrameOrder(f); err != nil { - return nil, err - } - if fr.logReads { - fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) - } - if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { - return fr.readMetaFrame(f.(*HeadersFrame)) - } - return f, nil -} - -// connError returns ConnectionError(code) but first -// stashes away a public reason to the caller can optionally relay it -// to the peer before hanging up on them. This might help others debug -// their implementations. -func (fr *Framer) connError(code ErrCode, reason string) error { - fr.errDetail = errors.New(reason) - return ConnectionError(code) -} - -// checkFrameOrder reports an error if f is an invalid frame to return -// next from ReadFrame. Mostly it checks whether HEADERS and -// CONTINUATION frames are contiguous. -func (fr *Framer) checkFrameOrder(f Frame) error { - last := fr.lastFrame - fr.lastFrame = f - if fr.AllowIllegalReads { - return nil - } - - fh := f.Header() - if fr.lastHeaderStream != 0 { - if fh.Type != FrameContinuation { - return fr.connError(ErrCodeProtocol, - fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", - fh.Type, fh.StreamID, - last.Header().Type, fr.lastHeaderStream)) - } - if fh.StreamID != fr.lastHeaderStream { - return fr.connError(ErrCodeProtocol, - fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", - fh.StreamID, fr.lastHeaderStream)) - } - } else if fh.Type == FrameContinuation { - return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) - } - - switch fh.Type { - case FrameHeaders, FrameContinuation: - if fh.Flags.Has(FlagHeadersEndHeaders) { - fr.lastHeaderStream = 0 - } else { - fr.lastHeaderStream = fh.StreamID - } - } - - return nil -} - -// A DataFrame conveys arbitrary, variable-length sequences of octets -// associated with a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.1 -type DataFrame struct { - FrameHeader - data []byte -} - -func (f *DataFrame) StreamEnded() bool { - return f.FrameHeader.Flags.Has(FlagDataEndStream) -} - -// Data returns the frame's data octets, not including any padding -// size byte or padding suffix bytes. -// The caller must not retain the returned memory past the next -// call to ReadFrame. -func (f *DataFrame) Data() []byte { - f.checkValid() - return f.data -} - -func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { - if fh.StreamID == 0 { - // DATA frames MUST be associated with a stream. If a - // DATA frame is received whose stream identifier - // field is 0x0, the recipient MUST respond with a - // connection error (Section 5.4.1) of type - // PROTOCOL_ERROR. - return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} - } - f := &DataFrame{ - FrameHeader: fh, - } - var padSize byte - if fh.Flags.Has(FlagDataPadded) { - var err error - payload, padSize, err = readByte(payload) - if err != nil { - return nil, err - } - } - if int(padSize) > len(payload) { - // If the length of the padding is greater than the - // length of the frame payload, the recipient MUST - // treat this as a connection error. - // Filed: https://github.com/http2/http2-spec/issues/610 - return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} - } - f.data = payload[:len(payload)-int(padSize)] - return f, nil -} - -var ( - errStreamID = errors.New("invalid stream ID") - errDepStreamID = errors.New("invalid dependent stream ID") - errPadLength = errors.New("pad length too large") -) - -func validStreamIDOrZero(streamID uint32) bool { - return streamID&(1<<31) == 0 -} - -func validStreamID(streamID uint32) bool { - return streamID != 0 && streamID&(1<<31) == 0 -} - -// WriteData writes a DATA frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility not to violate the maximum frame size -// and to not call other Write methods concurrently. -func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { - return f.WriteDataPadded(streamID, endStream, data, nil) -} - -// WriteData writes a DATA frame with optional padding. -// -// If pad is nil, the padding bit is not sent. -// The length of pad must not exceed 255 bytes. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility not to violate the maximum frame size -// and to not call other Write methods concurrently. -func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - if len(pad) > 255 { - return errPadLength - } - var flags Flags - if endStream { - flags |= FlagDataEndStream - } - if pad != nil { - flags |= FlagDataPadded - } - f.startWrite(FrameData, flags, streamID) - if pad != nil { - f.wbuf = append(f.wbuf, byte(len(pad))) - } - f.wbuf = append(f.wbuf, data...) - f.wbuf = append(f.wbuf, pad...) - return f.endWrite() -} - -// A SettingsFrame conveys configuration parameters that affect how -// endpoints communicate, such as preferences and constraints on peer -// behavior. -// -// See http://http2.github.io/http2-spec/#SETTINGS -type SettingsFrame struct { - FrameHeader - p []byte -} - -func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { - if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { - // When this (ACK 0x1) bit is set, the payload of the - // SETTINGS frame MUST be empty. Receipt of a - // SETTINGS frame with the ACK flag set and a length - // field value other than 0 MUST be treated as a - // connection error (Section 5.4.1) of type - // FRAME_SIZE_ERROR. - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID != 0 { - // SETTINGS frames always apply to a connection, - // never a single stream. The stream identifier for a - // SETTINGS frame MUST be zero (0x0). If an endpoint - // receives a SETTINGS frame whose stream identifier - // field is anything other than 0x0, the endpoint MUST - // respond with a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) - } - if len(p)%6 != 0 { - // Expecting even number of 6 byte settings. - return nil, ConnectionError(ErrCodeFrameSize) - } - f := &SettingsFrame{FrameHeader: fh, p: p} - if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { - // Values above the maximum flow control window size of 2^31 - 1 MUST - // be treated as a connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR. - return nil, ConnectionError(ErrCodeFlowControl) - } - return f, nil -} - -func (f *SettingsFrame) IsAck() bool { - return f.FrameHeader.Flags.Has(FlagSettingsAck) -} - -func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { - f.checkValid() - buf := f.p - for len(buf) > 0 { - settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) - if settingID == s { - return binary.BigEndian.Uint32(buf[2:6]), true - } - buf = buf[6:] - } - return 0, false -} - -// ForeachSetting runs fn for each setting. -// It stops and returns the first error. -func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { - f.checkValid() - buf := f.p - for len(buf) > 0 { - if err := fn(Setting{ - SettingID(binary.BigEndian.Uint16(buf[:2])), - binary.BigEndian.Uint32(buf[2:6]), - }); err != nil { - return err - } - buf = buf[6:] - } - return nil -} - -// WriteSettings writes a SETTINGS frame with zero or more settings -// specified and the ACK bit not set. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteSettings(settings ...Setting) error { - f.startWrite(FrameSettings, 0, 0) - for _, s := range settings { - f.writeUint16(uint16(s.ID)) - f.writeUint32(s.Val) - } - return f.endWrite() -} - -// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteSettingsAck() error { - f.startWrite(FrameSettings, FlagSettingsAck, 0) - return f.endWrite() -} - -// A PingFrame is a mechanism for measuring a minimal round trip time -// from the sender, as well as determining whether an idle connection -// is still functional. -// See http://http2.github.io/http2-spec/#rfc.section.6.7 -type PingFrame struct { - FrameHeader - Data [8]byte -} - -func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } - -func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { - if len(payload) != 8 { - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID != 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - f := &PingFrame{FrameHeader: fh} - copy(f.Data[:], payload) - return f, nil -} - -func (f *Framer) WritePing(ack bool, data [8]byte) error { - var flags Flags - if ack { - flags = FlagPingAck - } - f.startWrite(FramePing, flags, 0) - f.writeBytes(data[:]) - return f.endWrite() -} - -// A GoAwayFrame informs the remote peer to stop creating streams on this connection. -// See http://http2.github.io/http2-spec/#rfc.section.6.8 -type GoAwayFrame struct { - FrameHeader - LastStreamID uint32 - ErrCode ErrCode - debugData []byte -} - -// DebugData returns any debug data in the GOAWAY frame. Its contents -// are not defined. -// The caller must not retain the returned memory past the next -// call to ReadFrame. -func (f *GoAwayFrame) DebugData() []byte { - f.checkValid() - return f.debugData -} - -func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { - if fh.StreamID != 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - if len(p) < 8 { - return nil, ConnectionError(ErrCodeFrameSize) - } - return &GoAwayFrame{ - FrameHeader: fh, - LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), - ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), - debugData: p[8:], - }, nil -} - -func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { - f.startWrite(FrameGoAway, 0, 0) - f.writeUint32(maxStreamID & (1<<31 - 1)) - f.writeUint32(uint32(code)) - f.writeBytes(debugData) - return f.endWrite() -} - -// An UnknownFrame is the frame type returned when the frame type is unknown -// or no specific frame type parser exists. -type UnknownFrame struct { - FrameHeader - p []byte -} - -// Payload returns the frame's payload (after the header). It is not -// valid to call this method after a subsequent call to -// Framer.ReadFrame, nor is it valid to retain the returned slice. -// The memory is owned by the Framer and is invalidated when the next -// frame is read. -func (f *UnknownFrame) Payload() []byte { - f.checkValid() - return f.p -} - -func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { - return &UnknownFrame{fh, p}, nil -} - -// A WindowUpdateFrame is used to implement flow control. -// See http://http2.github.io/http2-spec/#rfc.section.6.9 -type WindowUpdateFrame struct { - FrameHeader - Increment uint32 // never read with high bit set -} - -func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { - if len(p) != 4 { - return nil, ConnectionError(ErrCodeFrameSize) - } - inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit - if inc == 0 { - // A receiver MUST treat the receipt of a - // WINDOW_UPDATE frame with an flow control window - // increment of 0 as a stream error (Section 5.4.2) of - // type PROTOCOL_ERROR; errors on the connection flow - // control window MUST be treated as a connection - // error (Section 5.4.1). - if fh.StreamID == 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - return nil, streamError(fh.StreamID, ErrCodeProtocol) - } - return &WindowUpdateFrame{ - FrameHeader: fh, - Increment: inc, - }, nil -} - -// WriteWindowUpdate writes a WINDOW_UPDATE frame. -// The increment value must be between 1 and 2,147,483,647, inclusive. -// If the Stream ID is zero, the window update applies to the -// connection as a whole. -func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { - // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." - if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { - return errors.New("illegal window increment value") - } - f.startWrite(FrameWindowUpdate, 0, streamID) - f.writeUint32(incr) - return f.endWrite() -} - -// A HeadersFrame is used to open a stream and additionally carries a -// header block fragment. -type HeadersFrame struct { - FrameHeader - - // Priority is set if FlagHeadersPriority is set in the FrameHeader. - Priority PriorityParam - - headerFragBuf []byte // not owned -} - -func (f *HeadersFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *HeadersFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) -} - -func (f *HeadersFrame) StreamEnded() bool { - return f.FrameHeader.Flags.Has(FlagHeadersEndStream) -} - -func (f *HeadersFrame) HasPriority() bool { - return f.FrameHeader.Flags.Has(FlagHeadersPriority) -} - -func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { - hf := &HeadersFrame{ - FrameHeader: fh, - } - if fh.StreamID == 0 { - // HEADERS frames MUST be associated with a stream. If a HEADERS frame - // is received whose stream identifier field is 0x0, the recipient MUST - // respond with a connection error (Section 5.4.1) of type - // PROTOCOL_ERROR. - return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} - } - var padLength uint8 - if fh.Flags.Has(FlagHeadersPadded) { - if p, padLength, err = readByte(p); err != nil { - return - } - } - if fh.Flags.Has(FlagHeadersPriority) { - var v uint32 - p, v, err = readUint32(p) - if err != nil { - return nil, err - } - hf.Priority.StreamDep = v & 0x7fffffff - hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set - p, hf.Priority.Weight, err = readByte(p) - if err != nil { - return nil, err - } - } - if len(p)-int(padLength) <= 0 { - return nil, streamError(fh.StreamID, ErrCodeProtocol) - } - hf.headerFragBuf = p[:len(p)-int(padLength)] - return hf, nil -} - -// HeadersFrameParam are the parameters for writing a HEADERS frame. -type HeadersFrameParam struct { - // StreamID is the required Stream ID to initiate. - StreamID uint32 - // BlockFragment is part (or all) of a Header Block. - BlockFragment []byte - - // EndStream indicates that the header block is the last that - // the endpoint will send for the identified stream. Setting - // this flag causes the stream to enter one of "half closed" - // states. - EndStream bool - - // EndHeaders indicates that this frame contains an entire - // header block and is not followed by any - // CONTINUATION frames. - EndHeaders bool - - // PadLength is the optional number of bytes of zeros to add - // to this frame. - PadLength uint8 - - // Priority, if non-zero, includes stream priority information - // in the HEADER frame. - Priority PriorityParam -} - -// WriteHeaders writes a single HEADERS frame. -// -// This is a low-level header writing method. Encoding headers and -// splitting them into any necessary CONTINUATION frames is handled -// elsewhere. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteHeaders(p HeadersFrameParam) error { - if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if p.PadLength != 0 { - flags |= FlagHeadersPadded - } - if p.EndStream { - flags |= FlagHeadersEndStream - } - if p.EndHeaders { - flags |= FlagHeadersEndHeaders - } - if !p.Priority.IsZero() { - flags |= FlagHeadersPriority - } - f.startWrite(FrameHeaders, flags, p.StreamID) - if p.PadLength != 0 { - f.writeByte(p.PadLength) - } - if !p.Priority.IsZero() { - v := p.Priority.StreamDep - if !validStreamIDOrZero(v) && !f.AllowIllegalWrites { - return errDepStreamID - } - if p.Priority.Exclusive { - v |= 1 << 31 - } - f.writeUint32(v) - f.writeByte(p.Priority.Weight) - } - f.wbuf = append(f.wbuf, p.BlockFragment...) - f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) - return f.endWrite() -} - -// A PriorityFrame specifies the sender-advised priority of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.3 -type PriorityFrame struct { - FrameHeader - PriorityParam -} - -// PriorityParam are the stream prioritzation parameters. -type PriorityParam struct { - // StreamDep is a 31-bit stream identifier for the - // stream that this stream depends on. Zero means no - // dependency. - StreamDep uint32 - - // Exclusive is whether the dependency is exclusive. - Exclusive bool - - // Weight is the stream's zero-indexed weight. It should be - // set together with StreamDep, or neither should be set. Per - // the spec, "Add one to the value to obtain a weight between - // 1 and 256." - Weight uint8 -} - -func (p PriorityParam) IsZero() bool { - return p == PriorityParam{} -} - -func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { - if fh.StreamID == 0 { - return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} - } - if len(payload) != 5 { - return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} - } - v := binary.BigEndian.Uint32(payload[:4]) - streamID := v & 0x7fffffff // mask off high bit - return &PriorityFrame{ - FrameHeader: fh, - PriorityParam: PriorityParam{ - Weight: payload[4], - StreamDep: streamID, - Exclusive: streamID != v, // was high bit set? - }, - }, nil -} - -// WritePriority writes a PRIORITY frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - if !validStreamIDOrZero(p.StreamDep) { - return errDepStreamID - } - f.startWrite(FramePriority, 0, streamID) - v := p.StreamDep - if p.Exclusive { - v |= 1 << 31 - } - f.writeUint32(v) - f.writeByte(p.Weight) - return f.endWrite() -} - -// A RSTStreamFrame allows for abnormal termination of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.4 -type RSTStreamFrame struct { - FrameHeader - ErrCode ErrCode -} - -func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { - if len(p) != 4 { - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID == 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil -} - -// WriteRSTStream writes a RST_STREAM frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - f.startWrite(FrameRSTStream, 0, streamID) - f.writeUint32(uint32(code)) - return f.endWrite() -} - -// A ContinuationFrame is used to continue a sequence of header block fragments. -// See http://http2.github.io/http2-spec/#rfc.section.6.10 -type ContinuationFrame struct { - FrameHeader - headerFragBuf []byte -} - -func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { - if fh.StreamID == 0 { - return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} - } - return &ContinuationFrame{fh, p}, nil -} - -func (f *ContinuationFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *ContinuationFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) -} - -// WriteContinuation writes a CONTINUATION frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if endHeaders { - flags |= FlagContinuationEndHeaders - } - f.startWrite(FrameContinuation, flags, streamID) - f.wbuf = append(f.wbuf, headerBlockFragment...) - return f.endWrite() -} - -// A PushPromiseFrame is used to initiate a server stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.6 -type PushPromiseFrame struct { - FrameHeader - PromiseID uint32 - headerFragBuf []byte // not owned -} - -func (f *PushPromiseFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *PushPromiseFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) -} - -func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { - pp := &PushPromiseFrame{ - FrameHeader: fh, - } - if pp.StreamID == 0 { - // PUSH_PROMISE frames MUST be associated with an existing, - // peer-initiated stream. The stream identifier of a - // PUSH_PROMISE frame indicates the stream it is associated - // with. If the stream identifier field specifies the value - // 0x0, a recipient MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) - } - // The PUSH_PROMISE frame includes optional padding. - // Padding fields and flags are identical to those defined for DATA frames - var padLength uint8 - if fh.Flags.Has(FlagPushPromisePadded) { - if p, padLength, err = readByte(p); err != nil { - return - } - } - - p, pp.PromiseID, err = readUint32(p) - if err != nil { - return - } - pp.PromiseID = pp.PromiseID & (1<<31 - 1) - - if int(padLength) > len(p) { - // like the DATA frame, error out if padding is longer than the body. - return nil, ConnectionError(ErrCodeProtocol) - } - pp.headerFragBuf = p[:len(p)-int(padLength)] - return pp, nil -} - -// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. -type PushPromiseParam struct { - // StreamID is the required Stream ID to initiate. - StreamID uint32 - - // PromiseID is the required Stream ID which this - // Push Promises - PromiseID uint32 - - // BlockFragment is part (or all) of a Header Block. - BlockFragment []byte - - // EndHeaders indicates that this frame contains an entire - // header block and is not followed by any - // CONTINUATION frames. - EndHeaders bool - - // PadLength is the optional number of bytes of zeros to add - // to this frame. - PadLength uint8 -} - -// WritePushPromise writes a single PushPromise Frame. -// -// As with Header Frames, This is the low level call for writing -// individual frames. Continuation frames are handled elsewhere. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WritePushPromise(p PushPromiseParam) error { - if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if p.PadLength != 0 { - flags |= FlagPushPromisePadded - } - if p.EndHeaders { - flags |= FlagPushPromiseEndHeaders - } - f.startWrite(FramePushPromise, flags, p.StreamID) - if p.PadLength != 0 { - f.writeByte(p.PadLength) - } - if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { - return errStreamID - } - f.writeUint32(p.PromiseID) - f.wbuf = append(f.wbuf, p.BlockFragment...) - f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) - return f.endWrite() -} - -// WriteRawFrame writes a raw frame. This can be used to write -// extension frames unknown to this package. -func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { - f.startWrite(t, flags, streamID) - f.writeBytes(payload) - return f.endWrite() -} - -func readByte(p []byte) (remain []byte, b byte, err error) { - if len(p) == 0 { - return nil, 0, io.ErrUnexpectedEOF - } - return p[1:], p[0], nil -} - -func readUint32(p []byte) (remain []byte, v uint32, err error) { - if len(p) < 4 { - return nil, 0, io.ErrUnexpectedEOF - } - return p[4:], binary.BigEndian.Uint32(p[:4]), nil -} - -type streamEnder interface { - StreamEnded() bool -} - -type headersEnder interface { - HeadersEnded() bool -} - -type headersOrContinuation interface { - headersEnder - HeaderBlockFragment() []byte -} - -// A MetaHeadersFrame is the representation of one HEADERS frame and -// zero or more contiguous CONTINUATION frames and the decoding of -// their HPACK-encoded contents. -// -// This type of frame does not appear on the wire and is only returned -// by the Framer when Framer.ReadMetaHeaders is set. -type MetaHeadersFrame struct { - *HeadersFrame - - // Fields are the fields contained in the HEADERS and - // CONTINUATION frames. The underlying slice is owned by the - // Framer and must not be retained after the next call to - // ReadFrame. - // - // Fields are guaranteed to be in the correct http2 order and - // not have unknown pseudo header fields or invalid header - // field names or values. Required pseudo header fields may be - // missing, however. Use the MetaHeadersFrame.Pseudo accessor - // method access pseudo headers. - Fields []hpack.HeaderField - - // Truncated is whether the max header list size limit was hit - // and Fields is incomplete. The hpack decoder state is still - // valid, however. - Truncated bool -} - -// PseudoValue returns the given pseudo header field's value. -// The provided pseudo field should not contain the leading colon. -func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { - for _, hf := range mh.Fields { - if !hf.IsPseudo() { - return "" - } - if hf.Name[1:] == pseudo { - return hf.Value - } - } - return "" -} - -// RegularFields returns the regular (non-pseudo) header fields of mh. -// The caller does not own the returned slice. -func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { - for i, hf := range mh.Fields { - if !hf.IsPseudo() { - return mh.Fields[i:] - } - } - return nil -} - -// PseudoFields returns the pseudo header fields of mh. -// The caller does not own the returned slice. -func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { - for i, hf := range mh.Fields { - if !hf.IsPseudo() { - return mh.Fields[:i] - } - } - return mh.Fields -} - -func (mh *MetaHeadersFrame) checkPseudos() error { - var isRequest, isResponse bool - pf := mh.PseudoFields() - for i, hf := range pf { - switch hf.Name { - case ":method", ":path", ":scheme", ":authority": - isRequest = true - case ":status": - isResponse = true - default: - return pseudoHeaderError(hf.Name) - } - // Check for duplicates. - // This would be a bad algorithm, but N is 4. - // And this doesn't allocate. - for _, hf2 := range pf[:i] { - if hf.Name == hf2.Name { - return duplicatePseudoHeaderError(hf.Name) - } - } - } - if isRequest && isResponse { - return errMixPseudoHeaderTypes - } - return nil -} - -func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) - } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 -} - -// readMetaFrame returns 0 or more CONTINUATION frames from fr and -// merge them into into the provided hf and returns a MetaHeadersFrame -// with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { - if fr.AllowIllegalReads { - return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") - } - mh := &MetaHeadersFrame{ - HeadersFrame: hf, - } - var remainSize = fr.maxHeaderListSize() - var sawRegular bool - - var invalid error // pseudo header field errors - hdec := fr.ReadMetaHeaders - hdec.SetEmitEnabled(true) - hdec.SetMaxStringLength(fr.maxHeaderStringLen()) - hdec.SetEmitFunc(func(hf hpack.HeaderField) { - if VerboseLogs && fr.logReads { - fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) - } - if !httplex.ValidHeaderFieldValue(hf.Value) { - invalid = headerFieldValueError(hf.Value) - } - isPseudo := strings.HasPrefix(hf.Name, ":") - if isPseudo { - if sawRegular { - invalid = errPseudoAfterRegular - } - } else { - sawRegular = true - if !validWireHeaderFieldName(hf.Name) { - invalid = headerFieldNameError(hf.Name) - } - } - - if invalid != nil { - hdec.SetEmitEnabled(false) - return - } - - size := hf.Size() - if size > remainSize { - hdec.SetEmitEnabled(false) - mh.Truncated = true - return - } - remainSize -= size - - mh.Fields = append(mh.Fields, hf) - }) - // Lose reference to MetaHeadersFrame: - defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) - - var hc headersOrContinuation = hf - for { - frag := hc.HeaderBlockFragment() - if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) - } - - if hc.HeadersEnded() { - break - } - if f, err := fr.ReadFrame(); err != nil { - return nil, err - } else { - hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder - } - } - - mh.HeadersFrame.headerFragBuf = nil - mh.HeadersFrame.invalidate() - - if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) - } - if invalid != nil { - fr.errDetail = invalid - if VerboseLogs { - log.Printf("http2: invalid header: %v", invalid) - } - return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} - } - if err := mh.checkPseudos(); err != nil { - fr.errDetail = err - if VerboseLogs { - log.Printf("http2: invalid pseudo headers: %v", err) - } - return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} - } - return mh, nil -} - -func summarizeFrame(f Frame) string { - var buf bytes.Buffer - f.Header().writeDebug(&buf) - switch f := f.(type) { - case *SettingsFrame: - n := 0 - f.ForeachSetting(func(s Setting) error { - n++ - if n == 1 { - buf.WriteString(", settings:") - } - fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) - return nil - }) - if n > 0 { - buf.Truncate(buf.Len() - 1) // remove trailing comma - } - case *DataFrame: - data := f.Data() - const max = 256 - if len(data) > max { - data = data[:max] - } - fmt.Fprintf(&buf, " data=%q", data) - if len(f.Data()) > max { - fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) - } - case *WindowUpdateFrame: - if f.StreamID == 0 { - buf.WriteString(" (conn)") - } - fmt.Fprintf(&buf, " incr=%v", f.Increment) - case *PingFrame: - fmt.Fprintf(&buf, " ping=%q", f.Data[:]) - case *GoAwayFrame: - fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", - f.LastStreamID, f.ErrCode, f.debugData) - case *RSTStreamFrame: - fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) - } - return buf.String() -} diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go deleted file mode 100644 index 2b72855f5521..000000000000 --- a/vendor/golang.org/x/net/http2/go16.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.6 - -package http2 - -import ( - "crypto/tls" - "net/http" - "time" -) - -func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { - return t1.ExpectContinueTimeout -} - -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -func isBadCipher(cipher uint16) bool { - switch cipher { - case tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: - // Reject cipher suites from Appendix A. - // "This list includes those cipher suites that do not - // offer an ephemeral key exchange and those that are - // based on the TLS null, stream or block cipher type" - return true - default: - return false - } -} diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go deleted file mode 100644 index 47b7fae081a3..000000000000 --- a/vendor/golang.org/x/net/http2/go17.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package http2 - -import ( - "context" - "net" - "net/http" - "net/http/httptrace" - "time" -) - -type contextContext interface { - context.Context -} - -func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { - ctx, cancel = context.WithCancel(context.Background()) - ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) - if hs := opts.baseConfig(); hs != nil { - ctx = context.WithValue(ctx, http.ServerContextKey, hs) - } - return -} - -func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { - return context.WithCancel(ctx) -} - -func requestWithContext(req *http.Request, ctx contextContext) *http.Request { - return req.WithContext(ctx) -} - -type clientTrace httptrace.ClientTrace - -func reqContext(r *http.Request) context.Context { return r.Context() } - -func (t *Transport) idleConnTimeout() time.Duration { - if t.t1 != nil { - return t.t1.IdleConnTimeout - } - return 0 -} - -func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } - -func traceGotConn(req *http.Request, cc *ClientConn) { - trace := httptrace.ContextClientTrace(req.Context()) - if trace == nil || trace.GotConn == nil { - return - } - ci := httptrace.GotConnInfo{Conn: cc.tconn} - cc.mu.Lock() - ci.Reused = cc.nextStreamID > 1 - ci.WasIdle = len(cc.streams) == 0 && ci.Reused - if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Now().Sub(cc.lastActive) - } - cc.mu.Unlock() - - trace.GotConn(ci) -} - -func traceWroteHeaders(trace *clientTrace) { - if trace != nil && trace.WroteHeaders != nil { - trace.WroteHeaders() - } -} - -func traceGot100Continue(trace *clientTrace) { - if trace != nil && trace.Got100Continue != nil { - trace.Got100Continue() - } -} - -func traceWait100Continue(trace *clientTrace) { - if trace != nil && trace.Wait100Continue != nil { - trace.Wait100Continue() - } -} - -func traceWroteRequest(trace *clientTrace, err error) { - if trace != nil && trace.WroteRequest != nil { - trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) - } -} - -func traceFirstResponseByte(trace *clientTrace) { - if trace != nil && trace.GotFirstResponseByte != nil { - trace.GotFirstResponseByte() - } -} - -func requestTrace(req *http.Request) *clientTrace { - trace := httptrace.ContextClientTrace(req.Context()) - return (*clientTrace)(trace) -} - -// Ping sends a PING frame to the server and waits for the ack. -func (cc *ClientConn) Ping(ctx context.Context) error { - return cc.ping(ctx) -} diff --git a/vendor/golang.org/x/net/http2/go17_not18.go b/vendor/golang.org/x/net/http2/go17_not18.go deleted file mode 100644 index b4c52ecec3be..000000000000 --- a/vendor/golang.org/x/net/http2/go17_not18.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7,!go1.8 - -package http2 - -import "crypto/tls" - -// temporary copy of Go 1.7's private tls.Config.clone: -func cloneTLSConfig(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - Renegotiation: c.Renegotiation, - } -} diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go deleted file mode 100644 index 633202c39fd1..000000000000 --- a/vendor/golang.org/x/net/http2/go18.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.8 - -package http2 - -import ( - "crypto/tls" - "io" - "net/http" -) - -func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() } - -var _ http.Pusher = (*responseWriter)(nil) - -// Push implements http.Pusher. -func (w *responseWriter) Push(target string, opts *http.PushOptions) error { - internalOpts := pushOptions{} - if opts != nil { - internalOpts.Method = opts.Method - internalOpts.Header = opts.Header - } - return w.push(target, internalOpts) -} - -func configureServer18(h1 *http.Server, h2 *Server) error { - if h2.IdleTimeout == 0 { - if h1.IdleTimeout != 0 { - h2.IdleTimeout = h1.IdleTimeout - } else { - h2.IdleTimeout = h1.ReadTimeout - } - } - return nil -} - -func shouldLogPanic(panicValue interface{}) bool { - return panicValue != nil && panicValue != http.ErrAbortHandler -} - -func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { - return req.GetBody -} - -func reqBodyIsNoBody(body io.ReadCloser) bool { - return body == http.NoBody -} diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go deleted file mode 100644 index 9933c9f8c748..000000000000 --- a/vendor/golang.org/x/net/http2/gotrack.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Defensive debug-only utility to track that functions run on the -// goroutine that they're supposed to. - -package http2 - -import ( - "bytes" - "errors" - "fmt" - "os" - "runtime" - "strconv" - "sync" -) - -var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" - -type goroutineLock uint64 - -func newGoroutineLock() goroutineLock { - if !DebugGoroutines { - return 0 - } - return goroutineLock(curGoroutineID()) -} - -func (g goroutineLock) check() { - if !DebugGoroutines { - return - } - if curGoroutineID() != uint64(g) { - panic("running on the wrong goroutine") - } -} - -func (g goroutineLock) checkNotOn() { - if !DebugGoroutines { - return - } - if curGoroutineID() == uint64(g) { - panic("running on the wrong goroutine") - } -} - -var goroutineSpace = []byte("goroutine ") - -func curGoroutineID() uint64 { - bp := littleBuf.Get().(*[]byte) - defer littleBuf.Put(bp) - b := *bp - b = b[:runtime.Stack(b, false)] - // Parse the 4707 out of "goroutine 4707 [" - b = bytes.TrimPrefix(b, goroutineSpace) - i := bytes.IndexByte(b, ' ') - if i < 0 { - panic(fmt.Sprintf("No space found in %q", b)) - } - b = b[:i] - n, err := parseUintBytes(b, 10, 64) - if err != nil { - panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) - } - return n -} - -var littleBuf = sync.Pool{ - New: func() interface{} { - buf := make([]byte, 64) - return &buf - }, -} - -// parseUintBytes is like strconv.ParseUint, but using a []byte. -func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { - var cutoff, maxVal uint64 - - if bitSize == 0 { - bitSize = int(strconv.IntSize) - } - - s0 := s - switch { - case len(s) < 1: - err = strconv.ErrSyntax - goto Error - - case 2 <= base && base <= 36: - // valid base; nothing to do - - case base == 0: - // Look for octal, hex prefix. - switch { - case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): - base = 16 - s = s[2:] - if len(s) < 1 { - err = strconv.ErrSyntax - goto Error - } - case s[0] == '0': - base = 8 - default: - base = 10 - } - - default: - err = errors.New("invalid base " + strconv.Itoa(base)) - goto Error - } - - n = 0 - cutoff = cutoff64(base) - maxVal = 1<= base { - n = 0 - err = strconv.ErrSyntax - goto Error - } - - if n >= cutoff { - // n*base overflows - n = 1<<64 - 1 - err = strconv.ErrRange - goto Error - } - n *= uint64(base) - - n1 := n + uint64(v) - if n1 < n || n1 > maxVal { - // n+v overflows - n = 1<<64 - 1 - err = strconv.ErrRange - goto Error - } - n = n1 - } - - return n, nil - -Error: - return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} -} - -// Return the first number n such that n*base >= 1<<64. -func cutoff64(base int) uint64 { - if base < 2 { - return 0 - } - return (1<<64-1)/uint64(base) + 1 -} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go deleted file mode 100644 index c2805f6ac4d5..000000000000 --- a/vendor/golang.org/x/net/http2/headermap.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "net/http" - "strings" -) - -var ( - commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case - commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case -) - -func init() { - for _, v := range []string{ - "accept", - "accept-charset", - "accept-encoding", - "accept-language", - "accept-ranges", - "age", - "access-control-allow-origin", - "allow", - "authorization", - "cache-control", - "content-disposition", - "content-encoding", - "content-language", - "content-length", - "content-location", - "content-range", - "content-type", - "cookie", - "date", - "etag", - "expect", - "expires", - "from", - "host", - "if-match", - "if-modified-since", - "if-none-match", - "if-unmodified-since", - "last-modified", - "link", - "location", - "max-forwards", - "proxy-authenticate", - "proxy-authorization", - "range", - "referer", - "refresh", - "retry-after", - "server", - "set-cookie", - "strict-transport-security", - "trailer", - "transfer-encoding", - "user-agent", - "vary", - "via", - "www-authenticate", - } { - chk := http.CanonicalHeaderKey(v) - commonLowerHeader[chk] = v - commonCanonHeader[v] = chk - } -} - -func lowerHeader(v string) string { - if s, ok := commonLowerHeader[v]; ok { - return s - } - return strings.ToLower(v) -} diff --git a/vendor/golang.org/x/net/http2/hpack/BUILD b/vendor/golang.org/x/net/http2/hpack/BUILD deleted file mode 100644 index 7537b9f30a64..000000000000 --- a/vendor/golang.org/x/net/http2/hpack/BUILD +++ /dev/null @@ -1,30 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "encode.go", - "hpack.go", - "huffman.go", - "tables.go", - ], - importpath = "golang.org/x/net/http2/hpack", -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go deleted file mode 100644 index f9bb03398483..000000000000 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "io" -) - -const ( - uint32Max = ^uint32(0) - initialHeaderTableSize = 4096 -) - -type Encoder struct { - dynTab dynamicTable - // minSize is the minimum table size set by - // SetMaxDynamicTableSize after the previous Header Table Size - // Update. - minSize uint32 - // maxSizeLimit is the maximum table size this encoder - // supports. This will protect the encoder from too large - // size. - maxSizeLimit uint32 - // tableSizeUpdate indicates whether "Header Table Size - // Update" is required. - tableSizeUpdate bool - w io.Writer - buf []byte -} - -// NewEncoder returns a new Encoder which performs HPACK encoding. An -// encoded data is written to w. -func NewEncoder(w io.Writer) *Encoder { - e := &Encoder{ - minSize: uint32Max, - maxSizeLimit: initialHeaderTableSize, - tableSizeUpdate: false, - w: w, - } - e.dynTab.setMaxSize(initialHeaderTableSize) - return e -} - -// WriteField encodes f into a single Write to e's underlying Writer. -// This function may also produce bytes for "Header Table Size Update" -// if necessary. If produced, it is done before encoding f. -func (e *Encoder) WriteField(f HeaderField) error { - e.buf = e.buf[:0] - - if e.tableSizeUpdate { - e.tableSizeUpdate = false - if e.minSize < e.dynTab.maxSize { - e.buf = appendTableSize(e.buf, e.minSize) - } - e.minSize = uint32Max - e.buf = appendTableSize(e.buf, e.dynTab.maxSize) - } - - idx, nameValueMatch := e.searchTable(f) - if nameValueMatch { - e.buf = appendIndexed(e.buf, idx) - } else { - indexing := e.shouldIndex(f) - if indexing { - e.dynTab.add(f) - } - - if idx == 0 { - e.buf = appendNewName(e.buf, f, indexing) - } else { - e.buf = appendIndexedName(e.buf, f, idx, indexing) - } - } - n, err := e.w.Write(e.buf) - if err == nil && n != len(e.buf) { - err = io.ErrShortWrite - } - return err -} - -// searchTable searches f in both stable and dynamic header tables. -// The static header table is searched first. Only when there is no -// exact match for both name and value, the dynamic header table is -// then searched. If there is no match, i is 0. If both name and value -// match, i is the matched index and nameValueMatch becomes true. If -// only name matches, i points to that index and nameValueMatch -// becomes false. -func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { - for idx, hf := range staticTable { - if !constantTimeStringCompare(hf.Name, f.Name) { - continue - } - if i == 0 { - i = uint64(idx + 1) - } - if f.Sensitive { - continue - } - if !constantTimeStringCompare(hf.Value, f.Value) { - continue - } - i = uint64(idx + 1) - nameValueMatch = true - return - } - - j, nameValueMatch := e.dynTab.search(f) - if nameValueMatch || (i == 0 && j != 0) { - i = j + uint64(len(staticTable)) - } - return -} - -// SetMaxDynamicTableSize changes the dynamic header table size to v. -// The actual size is bounded by the value passed to -// SetMaxDynamicTableSizeLimit. -func (e *Encoder) SetMaxDynamicTableSize(v uint32) { - if v > e.maxSizeLimit { - v = e.maxSizeLimit - } - if v < e.minSize { - e.minSize = v - } - e.tableSizeUpdate = true - e.dynTab.setMaxSize(v) -} - -// SetMaxDynamicTableSizeLimit changes the maximum value that can be -// specified in SetMaxDynamicTableSize to v. By default, it is set to -// 4096, which is the same size of the default dynamic header table -// size described in HPACK specification. If the current maximum -// dynamic header table size is strictly greater than v, "Header Table -// Size Update" will be done in the next WriteField call and the -// maximum dynamic header table size is truncated to v. -func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { - e.maxSizeLimit = v - if e.dynTab.maxSize > v { - e.tableSizeUpdate = true - e.dynTab.setMaxSize(v) - } -} - -// shouldIndex reports whether f should be indexed. -func (e *Encoder) shouldIndex(f HeaderField) bool { - return !f.Sensitive && f.Size() <= e.dynTab.maxSize -} - -// appendIndexed appends index i, as encoded in "Indexed Header Field" -// representation, to dst and returns the extended buffer. -func appendIndexed(dst []byte, i uint64) []byte { - first := len(dst) - dst = appendVarInt(dst, 7, i) - dst[first] |= 0x80 - return dst -} - -// appendNewName appends f, as encoded in one of "Literal Header field -// - New Name" representation variants, to dst and returns the -// extended buffer. -// -// If f.Sensitive is true, "Never Indexed" representation is used. If -// f.Sensitive is false and indexing is true, "Inremental Indexing" -// representation is used. -func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { - dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) - dst = appendHpackString(dst, f.Name) - return appendHpackString(dst, f.Value) -} - -// appendIndexedName appends f and index i referring indexed name -// entry, as encoded in one of "Literal Header field - Indexed Name" -// representation variants, to dst and returns the extended buffer. -// -// If f.Sensitive is true, "Never Indexed" representation is used. If -// f.Sensitive is false and indexing is true, "Incremental Indexing" -// representation is used. -func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { - first := len(dst) - var n byte - if indexing { - n = 6 - } else { - n = 4 - } - dst = appendVarInt(dst, n, i) - dst[first] |= encodeTypeByte(indexing, f.Sensitive) - return appendHpackString(dst, f.Value) -} - -// appendTableSize appends v, as encoded in "Header Table Size Update" -// representation, to dst and returns the extended buffer. -func appendTableSize(dst []byte, v uint32) []byte { - first := len(dst) - dst = appendVarInt(dst, 5, uint64(v)) - dst[first] |= 0x20 - return dst -} - -// appendVarInt appends i, as encoded in variable integer form using n -// bit prefix, to dst and returns the extended buffer. -// -// See -// http://http2.github.io/http2-spec/compression.html#integer.representation -func appendVarInt(dst []byte, n byte, i uint64) []byte { - k := uint64((1 << n) - 1) - if i < k { - return append(dst, byte(i)) - } - dst = append(dst, byte(k)) - i -= k - for ; i >= 128; i >>= 7 { - dst = append(dst, byte(0x80|(i&0x7f))) - } - return append(dst, byte(i)) -} - -// appendHpackString appends s, as encoded in "String Literal" -// representation, to dst and returns the the extended buffer. -// -// s will be encoded in Huffman codes only when it produces strictly -// shorter byte string. -func appendHpackString(dst []byte, s string) []byte { - huffmanLength := HuffmanEncodeLength(s) - if huffmanLength < uint64(len(s)) { - first := len(dst) - dst = appendVarInt(dst, 7, huffmanLength) - dst = AppendHuffmanString(dst, s) - dst[first] |= 0x80 - } else { - dst = appendVarInt(dst, 7, uint64(len(s))) - dst = append(dst, s...) - } - return dst -} - -// encodeTypeByte returns type byte. If sensitive is true, type byte -// for "Never Indexed" representation is returned. If sensitive is -// false and indexing is true, type byte for "Incremental Indexing" -// representation is returned. Otherwise, type byte for "Without -// Indexing" is returned. -func encodeTypeByte(indexing, sensitive bool) byte { - if sensitive { - return 0x10 - } - if indexing { - return 0x40 - } - return 0 -} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go deleted file mode 100644 index 135b9f62cd64..000000000000 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ /dev/null @@ -1,542 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package hpack implements HPACK, a compression format for -// efficiently representing HTTP header fields in the context of HTTP/2. -// -// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 -package hpack - -import ( - "bytes" - "errors" - "fmt" -) - -// A DecodingError is something the spec defines as a decoding error. -type DecodingError struct { - Err error -} - -func (de DecodingError) Error() string { - return fmt.Sprintf("decoding error: %v", de.Err) -} - -// An InvalidIndexError is returned when an encoder references a table -// entry before the static table or after the end of the dynamic table. -type InvalidIndexError int - -func (e InvalidIndexError) Error() string { - return fmt.Sprintf("invalid indexed representation index %d", int(e)) -} - -// A HeaderField is a name-value pair. Both the name and value are -// treated as opaque sequences of octets. -type HeaderField struct { - Name, Value string - - // Sensitive means that this header field should never be - // indexed. - Sensitive bool -} - -// IsPseudo reports whether the header field is an http2 pseudo header. -// That is, it reports whether it starts with a colon. -// It is not otherwise guaranteed to be a valid pseudo header field, -// though. -func (hf HeaderField) IsPseudo() bool { - return len(hf.Name) != 0 && hf.Name[0] == ':' -} - -func (hf HeaderField) String() string { - var suffix string - if hf.Sensitive { - suffix = " (sensitive)" - } - return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) -} - -// Size returns the size of an entry per RFC 7541 section 4.1. -func (hf HeaderField) Size() uint32 { - // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 - // "The size of the dynamic table is the sum of the size of - // its entries. The size of an entry is the sum of its name's - // length in octets (as defined in Section 5.2), its value's - // length in octets (see Section 5.2), plus 32. The size of - // an entry is calculated using the length of the name and - // value without any Huffman encoding applied." - - // This can overflow if somebody makes a large HeaderField - // Name and/or Value by hand, but we don't care, because that - // won't happen on the wire because the encoding doesn't allow - // it. - return uint32(len(hf.Name) + len(hf.Value) + 32) -} - -// A Decoder is the decoding context for incremental processing of -// header blocks. -type Decoder struct { - dynTab dynamicTable - emit func(f HeaderField) - - emitEnabled bool // whether calls to emit are enabled - maxStrLen int // 0 means unlimited - - // buf is the unparsed buffer. It's only written to - // saveBuf if it was truncated in the middle of a header - // block. Because it's usually not owned, we can only - // process it under Write. - buf []byte // not owned; only valid during Write - - // saveBuf is previous data passed to Write which we weren't able - // to fully parse before. Unlike buf, we own this data. - saveBuf bytes.Buffer -} - -// NewDecoder returns a new decoder with the provided maximum dynamic -// table size. The emitFunc will be called for each valid field -// parsed, in the same goroutine as calls to Write, before Write returns. -func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { - d := &Decoder{ - emit: emitFunc, - emitEnabled: true, - } - d.dynTab.allowedMaxSize = maxDynamicTableSize - d.dynTab.setMaxSize(maxDynamicTableSize) - return d -} - -// ErrStringLength is returned by Decoder.Write when the max string length -// (as configured by Decoder.SetMaxStringLength) would be violated. -var ErrStringLength = errors.New("hpack: string too long") - -// SetMaxStringLength sets the maximum size of a HeaderField name or -// value string. If a string exceeds this length (even after any -// decompression), Write will return ErrStringLength. -// A value of 0 means unlimited and is the default from NewDecoder. -func (d *Decoder) SetMaxStringLength(n int) { - d.maxStrLen = n -} - -// SetEmitFunc changes the callback used when new header fields -// are decoded. -// It must be non-nil. It does not affect EmitEnabled. -func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { - d.emit = emitFunc -} - -// SetEmitEnabled controls whether the emitFunc provided to NewDecoder -// should be called. The default is true. -// -// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE -// while still decoding and keeping in-sync with decoder state, but -// without doing unnecessary decompression or generating unnecessary -// garbage for header fields past the limit. -func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } - -// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder -// are currently enabled. The default is true. -func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } - -// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their -// underlying buffers for garbage reasons. - -func (d *Decoder) SetMaxDynamicTableSize(v uint32) { - d.dynTab.setMaxSize(v) -} - -// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded -// stream (via dynamic table size updates) may set the maximum size -// to. -func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { - d.dynTab.allowedMaxSize = v -} - -type dynamicTable struct { - // ents is the FIFO described at - // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 - // The newest (low index) is append at the end, and items are - // evicted from the front. - ents []HeaderField - size uint32 - maxSize uint32 // current maxSize - allowedMaxSize uint32 // maxSize may go up to this, inclusive -} - -func (dt *dynamicTable) setMaxSize(v uint32) { - dt.maxSize = v - dt.evict() -} - -// TODO: change dynamicTable to be a struct with a slice and a size int field, -// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1: -// -// -// Then make add increment the size. maybe the max size should move from Decoder to -// dynamicTable and add should return an ok bool if there was enough space. -// -// Later we'll need a remove operation on dynamicTable. - -func (dt *dynamicTable) add(f HeaderField) { - dt.ents = append(dt.ents, f) - dt.size += f.Size() - dt.evict() -} - -// If we're too big, evict old stuff (front of the slice) -func (dt *dynamicTable) evict() { - base := dt.ents // keep base pointer of slice - for dt.size > dt.maxSize { - dt.size -= dt.ents[0].Size() - dt.ents = dt.ents[1:] - } - - // Shift slice contents down if we evicted things. - if len(dt.ents) != len(base) { - copy(base, dt.ents) - dt.ents = base[:len(dt.ents)] - } -} - -// constantTimeStringCompare compares string a and b in a constant -// time manner. -func constantTimeStringCompare(a, b string) bool { - if len(a) != len(b) { - return false - } - - c := byte(0) - - for i := 0; i < len(a); i++ { - c |= a[i] ^ b[i] - } - - return c == 0 -} - -// Search searches f in the table. The return value i is 0 if there is -// no name match. If there is name match or name/value match, i is the -// index of that entry (1-based). If both name and value match, -// nameValueMatch becomes true. -func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) { - l := len(dt.ents) - for j := l - 1; j >= 0; j-- { - ent := dt.ents[j] - if !constantTimeStringCompare(ent.Name, f.Name) { - continue - } - if i == 0 { - i = uint64(l - j) - } - if f.Sensitive { - continue - } - if !constantTimeStringCompare(ent.Value, f.Value) { - continue - } - i = uint64(l - j) - nameValueMatch = true - return - } - return -} - -func (d *Decoder) maxTableIndex() int { - return len(d.dynTab.ents) + len(staticTable) -} - -func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { - if i < 1 { - return - } - if i > uint64(d.maxTableIndex()) { - return - } - if i <= uint64(len(staticTable)) { - return staticTable[i-1], true - } - dents := d.dynTab.ents - return dents[len(dents)-(int(i)-len(staticTable))], true -} - -// Decode decodes an entire block. -// -// TODO: remove this method and make it incremental later? This is -// easier for debugging now. -func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { - var hf []HeaderField - saveFunc := d.emit - defer func() { d.emit = saveFunc }() - d.emit = func(f HeaderField) { hf = append(hf, f) } - if _, err := d.Write(p); err != nil { - return nil, err - } - if err := d.Close(); err != nil { - return nil, err - } - return hf, nil -} - -func (d *Decoder) Close() error { - if d.saveBuf.Len() > 0 { - d.saveBuf.Reset() - return DecodingError{errors.New("truncated headers")} - } - return nil -} - -func (d *Decoder) Write(p []byte) (n int, err error) { - if len(p) == 0 { - // Prevent state machine CPU attacks (making us redo - // work up to the point of finding out we don't have - // enough data) - return - } - // Only copy the data if we have to. Optimistically assume - // that p will contain a complete header block. - if d.saveBuf.Len() == 0 { - d.buf = p - } else { - d.saveBuf.Write(p) - d.buf = d.saveBuf.Bytes() - d.saveBuf.Reset() - } - - for len(d.buf) > 0 { - err = d.parseHeaderFieldRepr() - if err == errNeedMore { - // Extra paranoia, making sure saveBuf won't - // get too large. All the varint and string - // reading code earlier should already catch - // overlong things and return ErrStringLength, - // but keep this as a last resort. - const varIntOverhead = 8 // conservative - if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { - return 0, ErrStringLength - } - d.saveBuf.Write(d.buf) - return len(p), nil - } - if err != nil { - break - } - } - return len(p), err -} - -// errNeedMore is an internal sentinel error value that means the -// buffer is truncated and we need to read more data before we can -// continue parsing. -var errNeedMore = errors.New("need more data") - -type indexType int - -const ( - indexedTrue indexType = iota - indexedFalse - indexedNever -) - -func (v indexType) indexed() bool { return v == indexedTrue } -func (v indexType) sensitive() bool { return v == indexedNever } - -// returns errNeedMore if there isn't enough data available. -// any other error is fatal. -// consumes d.buf iff it returns nil. -// precondition: must be called with len(d.buf) > 0 -func (d *Decoder) parseHeaderFieldRepr() error { - b := d.buf[0] - switch { - case b&128 != 0: - // Indexed representation. - // High bit set? - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 - return d.parseFieldIndexed() - case b&192 == 64: - // 6.2.1 Literal Header Field with Incremental Indexing - // 0b10xxxxxx: top two bits are 10 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 - return d.parseFieldLiteral(6, indexedTrue) - case b&240 == 0: - // 6.2.2 Literal Header Field without Indexing - // 0b0000xxxx: top four bits are 0000 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 - return d.parseFieldLiteral(4, indexedFalse) - case b&240 == 16: - // 6.2.3 Literal Header Field never Indexed - // 0b0001xxxx: top four bits are 0001 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 - return d.parseFieldLiteral(4, indexedNever) - case b&224 == 32: - // 6.3 Dynamic Table Size Update - // Top three bits are '001'. - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 - return d.parseDynamicTableSizeUpdate() - } - - return DecodingError{errors.New("invalid encoding")} -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseFieldIndexed() error { - buf := d.buf - idx, buf, err := readVarInt(7, buf) - if err != nil { - return err - } - hf, ok := d.at(idx) - if !ok { - return DecodingError{InvalidIndexError(idx)} - } - d.buf = buf - return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { - buf := d.buf - nameIdx, buf, err := readVarInt(n, buf) - if err != nil { - return err - } - - var hf HeaderField - wantStr := d.emitEnabled || it.indexed() - if nameIdx > 0 { - ihf, ok := d.at(nameIdx) - if !ok { - return DecodingError{InvalidIndexError(nameIdx)} - } - hf.Name = ihf.Name - } else { - hf.Name, buf, err = d.readString(buf, wantStr) - if err != nil { - return err - } - } - hf.Value, buf, err = d.readString(buf, wantStr) - if err != nil { - return err - } - d.buf = buf - if it.indexed() { - d.dynTab.add(hf) - } - hf.Sensitive = it.sensitive() - return d.callEmit(hf) -} - -func (d *Decoder) callEmit(hf HeaderField) error { - if d.maxStrLen != 0 { - if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { - return ErrStringLength - } - } - if d.emitEnabled { - d.emit(hf) - } - return nil -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseDynamicTableSizeUpdate() error { - buf := d.buf - size, buf, err := readVarInt(5, buf) - if err != nil { - return err - } - if size > uint64(d.dynTab.allowedMaxSize) { - return DecodingError{errors.New("dynamic table size update too large")} - } - d.dynTab.setMaxSize(uint32(size)) - d.buf = buf - return nil -} - -var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} - -// readVarInt reads an unsigned variable length integer off the -// beginning of p. n is the parameter as described in -// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. -// -// n must always be between 1 and 8. -// -// The returned remain buffer is either a smaller suffix of p, or err != nil. -// The error is errNeedMore if p doesn't contain a complete integer. -func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { - if n < 1 || n > 8 { - panic("bad n") - } - if len(p) == 0 { - return 0, p, errNeedMore - } - i = uint64(p[0]) - if n < 8 { - i &= (1 << uint64(n)) - 1 - } - if i < (1< 0 { - b := p[0] - p = p[1:] - i += uint64(b&127) << m - if b&128 == 0 { - return i, p, nil - } - m += 7 - if m >= 63 { // TODO: proper overflow check. making this up. - return 0, origP, errVarintOverflow - } - } - return 0, origP, errNeedMore -} - -// readString decodes an hpack string from p. -// -// wantStr is whether s will be used. If false, decompression and -// []byte->string garbage are skipped if s will be ignored -// anyway. This does mean that huffman decoding errors for non-indexed -// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server -// is returning an error anyway, and because they're not indexed, the error -// won't affect the decoding state. -func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { - if len(p) == 0 { - return "", p, errNeedMore - } - isHuff := p[0]&128 != 0 - strLen, p, err := readVarInt(7, p) - if err != nil { - return "", p, err - } - if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { - return "", nil, ErrStringLength - } - if uint64(len(p)) < strLen { - return "", p, errNeedMore - } - if !isHuff { - if wantStr { - s = string(p[:strLen]) - } - return s, p[strLen:], nil - } - - if wantStr { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() // don't trust others - defer bufPool.Put(buf) - if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { - buf.Reset() - return "", nil, err - } - s = buf.String() - buf.Reset() // be nice to GC - } - return s, p[strLen:], nil -} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go deleted file mode 100644 index 8850e3946770..000000000000 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "bytes" - "errors" - "io" - "sync" -) - -var bufPool = sync.Pool{ - New: func() interface{} { return new(bytes.Buffer) }, -} - -// HuffmanDecode decodes the string in v and writes the expanded -// result to w, returning the number of bytes written to w and the -// Write call's return value. At most one Write call is made. -func HuffmanDecode(w io.Writer, v []byte) (int, error) { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - if err := huffmanDecode(buf, 0, v); err != nil { - return 0, err - } - return w.Write(buf.Bytes()) -} - -// HuffmanDecodeToString decodes the string in v. -func HuffmanDecodeToString(v []byte) (string, error) { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - if err := huffmanDecode(buf, 0, v); err != nil { - return "", err - } - return buf.String(), nil -} - -// ErrInvalidHuffman is returned for errors found decoding -// Huffman-encoded strings. -var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") - -// huffmanDecode decodes v to buf. -// If maxLen is greater than 0, attempts to write more to buf than -// maxLen bytes will return ErrStringLength. -func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { - n := rootHuffmanNode - // cur is the bit buffer that has not been fed into n. - // cbits is the number of low order bits in cur that are valid. - // sbits is the number of bits of the symbol prefix being decoded. - cur, cbits, sbits := uint(0), uint8(0), uint8(0) - for _, b := range v { - cur = cur<<8 | uint(b) - cbits += 8 - sbits += 8 - for cbits >= 8 { - idx := byte(cur >> (cbits - 8)) - n = n.children[idx] - if n == nil { - return ErrInvalidHuffman - } - if n.children == nil { - if maxLen != 0 && buf.Len() == maxLen { - return ErrStringLength - } - buf.WriteByte(n.sym) - cbits -= n.codeLen - n = rootHuffmanNode - sbits = cbits - } else { - cbits -= 8 - } - } - } - for cbits > 0 { - n = n.children[byte(cur<<(8-cbits))] - if n == nil { - return ErrInvalidHuffman - } - if n.children != nil || n.codeLen > cbits { - break - } - if maxLen != 0 && buf.Len() == maxLen { - return ErrStringLength - } - buf.WriteByte(n.sym) - cbits -= n.codeLen - n = rootHuffmanNode - sbits = cbits - } - if sbits > 7 { - // Either there was an incomplete symbol, or overlong padding. - // Both are decoding errors per RFC 7541 section 5.2. - return ErrInvalidHuffman - } - if mask := uint(1< 8 { - codeLen -= 8 - i := uint8(code >> codeLen) - if cur.children[i] == nil { - cur.children[i] = newInternalNode() - } - cur = cur.children[i] - } - shift := 8 - codeLen - start, end := int(uint8(code<> (nbits - rembits)) - dst[len(dst)-1] |= t - } - - return dst -} - -// HuffmanEncodeLength returns the number of bytes required to encode -// s in Huffman codes. The result is round up to byte boundary. -func HuffmanEncodeLength(s string) uint64 { - n := uint64(0) - for i := 0; i < len(s); i++ { - n += uint64(huffmanCodeLen[s[i]]) - } - return (n + 7) / 8 -} - -// appendByteToHuffmanCode appends Huffman code for c to dst and -// returns the extended buffer and the remaining bits in the last -// element. The appending is not byte aligned and the remaining bits -// in the last element of dst is given in rembits. -func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { - code := huffmanCodes[c] - nbits := huffmanCodeLen[c] - - for { - if rembits > nbits { - t := uint8(code << (rembits - nbits)) - dst[len(dst)-1] |= t - rembits -= nbits - break - } - - t := uint8(code >> (nbits - rembits)) - dst[len(dst)-1] |= t - - nbits -= rembits - rembits = 8 - - if nbits == 0 { - break - } - - dst = append(dst, 0) - } - - return dst, rembits -} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go deleted file mode 100644 index b9283a023302..000000000000 --- a/vendor/golang.org/x/net/http2/hpack/tables.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -func pair(name, value string) HeaderField { - return HeaderField{Name: name, Value: value} -} - -// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B -var staticTable = [...]HeaderField{ - pair(":authority", ""), // index 1 (1-based) - pair(":method", "GET"), - pair(":method", "POST"), - pair(":path", "/"), - pair(":path", "/index.html"), - pair(":scheme", "http"), - pair(":scheme", "https"), - pair(":status", "200"), - pair(":status", "204"), - pair(":status", "206"), - pair(":status", "304"), - pair(":status", "400"), - pair(":status", "404"), - pair(":status", "500"), - pair("accept-charset", ""), - pair("accept-encoding", "gzip, deflate"), - pair("accept-language", ""), - pair("accept-ranges", ""), - pair("accept", ""), - pair("access-control-allow-origin", ""), - pair("age", ""), - pair("allow", ""), - pair("authorization", ""), - pair("cache-control", ""), - pair("content-disposition", ""), - pair("content-encoding", ""), - pair("content-language", ""), - pair("content-length", ""), - pair("content-location", ""), - pair("content-range", ""), - pair("content-type", ""), - pair("cookie", ""), - pair("date", ""), - pair("etag", ""), - pair("expect", ""), - pair("expires", ""), - pair("from", ""), - pair("host", ""), - pair("if-match", ""), - pair("if-modified-since", ""), - pair("if-none-match", ""), - pair("if-range", ""), - pair("if-unmodified-since", ""), - pair("last-modified", ""), - pair("link", ""), - pair("location", ""), - pair("max-forwards", ""), - pair("proxy-authenticate", ""), - pair("proxy-authorization", ""), - pair("range", ""), - pair("referer", ""), - pair("refresh", ""), - pair("retry-after", ""), - pair("server", ""), - pair("set-cookie", ""), - pair("strict-transport-security", ""), - pair("transfer-encoding", ""), - pair("user-agent", ""), - pair("vary", ""), - pair("via", ""), - pair("www-authenticate", ""), -} - -var huffmanCodes = [256]uint32{ - 0x1ff8, - 0x7fffd8, - 0xfffffe2, - 0xfffffe3, - 0xfffffe4, - 0xfffffe5, - 0xfffffe6, - 0xfffffe7, - 0xfffffe8, - 0xffffea, - 0x3ffffffc, - 0xfffffe9, - 0xfffffea, - 0x3ffffffd, - 0xfffffeb, - 0xfffffec, - 0xfffffed, - 0xfffffee, - 0xfffffef, - 0xffffff0, - 0xffffff1, - 0xffffff2, - 0x3ffffffe, - 0xffffff3, - 0xffffff4, - 0xffffff5, - 0xffffff6, - 0xffffff7, - 0xffffff8, - 0xffffff9, - 0xffffffa, - 0xffffffb, - 0x14, - 0x3f8, - 0x3f9, - 0xffa, - 0x1ff9, - 0x15, - 0xf8, - 0x7fa, - 0x3fa, - 0x3fb, - 0xf9, - 0x7fb, - 0xfa, - 0x16, - 0x17, - 0x18, - 0x0, - 0x1, - 0x2, - 0x19, - 0x1a, - 0x1b, - 0x1c, - 0x1d, - 0x1e, - 0x1f, - 0x5c, - 0xfb, - 0x7ffc, - 0x20, - 0xffb, - 0x3fc, - 0x1ffa, - 0x21, - 0x5d, - 0x5e, - 0x5f, - 0x60, - 0x61, - 0x62, - 0x63, - 0x64, - 0x65, - 0x66, - 0x67, - 0x68, - 0x69, - 0x6a, - 0x6b, - 0x6c, - 0x6d, - 0x6e, - 0x6f, - 0x70, - 0x71, - 0x72, - 0xfc, - 0x73, - 0xfd, - 0x1ffb, - 0x7fff0, - 0x1ffc, - 0x3ffc, - 0x22, - 0x7ffd, - 0x3, - 0x23, - 0x4, - 0x24, - 0x5, - 0x25, - 0x26, - 0x27, - 0x6, - 0x74, - 0x75, - 0x28, - 0x29, - 0x2a, - 0x7, - 0x2b, - 0x76, - 0x2c, - 0x8, - 0x9, - 0x2d, - 0x77, - 0x78, - 0x79, - 0x7a, - 0x7b, - 0x7ffe, - 0x7fc, - 0x3ffd, - 0x1ffd, - 0xffffffc, - 0xfffe6, - 0x3fffd2, - 0xfffe7, - 0xfffe8, - 0x3fffd3, - 0x3fffd4, - 0x3fffd5, - 0x7fffd9, - 0x3fffd6, - 0x7fffda, - 0x7fffdb, - 0x7fffdc, - 0x7fffdd, - 0x7fffde, - 0xffffeb, - 0x7fffdf, - 0xffffec, - 0xffffed, - 0x3fffd7, - 0x7fffe0, - 0xffffee, - 0x7fffe1, - 0x7fffe2, - 0x7fffe3, - 0x7fffe4, - 0x1fffdc, - 0x3fffd8, - 0x7fffe5, - 0x3fffd9, - 0x7fffe6, - 0x7fffe7, - 0xffffef, - 0x3fffda, - 0x1fffdd, - 0xfffe9, - 0x3fffdb, - 0x3fffdc, - 0x7fffe8, - 0x7fffe9, - 0x1fffde, - 0x7fffea, - 0x3fffdd, - 0x3fffde, - 0xfffff0, - 0x1fffdf, - 0x3fffdf, - 0x7fffeb, - 0x7fffec, - 0x1fffe0, - 0x1fffe1, - 0x3fffe0, - 0x1fffe2, - 0x7fffed, - 0x3fffe1, - 0x7fffee, - 0x7fffef, - 0xfffea, - 0x3fffe2, - 0x3fffe3, - 0x3fffe4, - 0x7ffff0, - 0x3fffe5, - 0x3fffe6, - 0x7ffff1, - 0x3ffffe0, - 0x3ffffe1, - 0xfffeb, - 0x7fff1, - 0x3fffe7, - 0x7ffff2, - 0x3fffe8, - 0x1ffffec, - 0x3ffffe2, - 0x3ffffe3, - 0x3ffffe4, - 0x7ffffde, - 0x7ffffdf, - 0x3ffffe5, - 0xfffff1, - 0x1ffffed, - 0x7fff2, - 0x1fffe3, - 0x3ffffe6, - 0x7ffffe0, - 0x7ffffe1, - 0x3ffffe7, - 0x7ffffe2, - 0xfffff2, - 0x1fffe4, - 0x1fffe5, - 0x3ffffe8, - 0x3ffffe9, - 0xffffffd, - 0x7ffffe3, - 0x7ffffe4, - 0x7ffffe5, - 0xfffec, - 0xfffff3, - 0xfffed, - 0x1fffe6, - 0x3fffe9, - 0x1fffe7, - 0x1fffe8, - 0x7ffff3, - 0x3fffea, - 0x3fffeb, - 0x1ffffee, - 0x1ffffef, - 0xfffff4, - 0xfffff5, - 0x3ffffea, - 0x7ffff4, - 0x3ffffeb, - 0x7ffffe6, - 0x3ffffec, - 0x3ffffed, - 0x7ffffe7, - 0x7ffffe8, - 0x7ffffe9, - 0x7ffffea, - 0x7ffffeb, - 0xffffffe, - 0x7ffffec, - 0x7ffffed, - 0x7ffffee, - 0x7ffffef, - 0x7fffff0, - 0x3ffffee, -} - -var huffmanCodeLen = [256]uint8{ - 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, - 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, - 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, - 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, - 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, - 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, - 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, - 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, - 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, - 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, - 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, - 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, - 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, - 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, - 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, -} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go deleted file mode 100644 index a91fed570691..000000000000 --- a/vendor/golang.org/x/net/http2/http2.go +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package http2 implements the HTTP/2 protocol. -// -// This package is low-level and intended to be used directly by very -// few people. Most users will use it indirectly through the automatic -// use by the net/http package (from Go 1.6 and later). -// For use in earlier Go versions see ConfigureServer. (Transport support -// requires Go 1.6 or later) -// -// See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. -// -package http2 - -import ( - "bufio" - "crypto/tls" - "errors" - "fmt" - "io" - "net/http" - "os" - "sort" - "strconv" - "strings" - "sync" - - "golang.org/x/net/lex/httplex" -) - -var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool -) - -func init() { - e := os.Getenv("GODEBUG") - if strings.Contains(e, "http2debug=1") { - VerboseLogs = true - } - if strings.Contains(e, "http2debug=2") { - VerboseLogs = true - logFrameWrites = true - logFrameReads = true - } -} - -const ( - // ClientPreface is the string that must be sent by new - // connections from clients. - ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" - - // SETTINGS_MAX_FRAME_SIZE default - // http://http2.github.io/http2-spec/#rfc.section.6.5.2 - initialMaxFrameSize = 16384 - - // NextProtoTLS is the NPN/ALPN protocol negotiated during - // HTTP/2's TLS setup. - NextProtoTLS = "h2" - - // http://http2.github.io/http2-spec/#SettingValues - initialHeaderTableSize = 4096 - - initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size - - defaultMaxReadFrameSize = 1 << 20 -) - -var ( - clientPreface = []byte(ClientPreface) -) - -type streamState int - -// HTTP/2 stream states. -// -// See http://tools.ietf.org/html/rfc7540#section-5.1. -// -// For simplicity, the server code merges "reserved (local)" into -// "half-closed (remote)". This is one less state transition to track. -// The only downside is that we send PUSH_PROMISEs slightly less -// liberally than allowable. More discussion here: -// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html -// -// "reserved (remote)" is omitted since the client code does not -// support server push. -const ( - stateIdle streamState = iota - stateOpen - stateHalfClosedLocal - stateHalfClosedRemote - stateClosed -) - -var stateName = [...]string{ - stateIdle: "Idle", - stateOpen: "Open", - stateHalfClosedLocal: "HalfClosedLocal", - stateHalfClosedRemote: "HalfClosedRemote", - stateClosed: "Closed", -} - -func (st streamState) String() string { - return stateName[st] -} - -// Setting is a setting parameter: which setting it is, and its value. -type Setting struct { - // ID is which setting is being set. - // See http://http2.github.io/http2-spec/#SettingValues - ID SettingID - - // Val is the value. - Val uint32 -} - -func (s Setting) String() string { - return fmt.Sprintf("[%v = %d]", s.ID, s.Val) -} - -// Valid reports whether the setting is valid. -func (s Setting) Valid() error { - // Limits and error codes from 6.5.2 Defined SETTINGS Parameters - switch s.ID { - case SettingEnablePush: - if s.Val != 1 && s.Val != 0 { - return ConnectionError(ErrCodeProtocol) - } - case SettingInitialWindowSize: - if s.Val > 1<<31-1 { - return ConnectionError(ErrCodeFlowControl) - } - case SettingMaxFrameSize: - if s.Val < 16384 || s.Val > 1<<24-1 { - return ConnectionError(ErrCodeProtocol) - } - } - return nil -} - -// A SettingID is an HTTP/2 setting as defined in -// http://http2.github.io/http2-spec/#iana-settings -type SettingID uint16 - -const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 -) - -var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", -} - -func (s SettingID) String() string { - if v, ok := settingName[s]; ok { - return v - } - return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) -} - -var ( - errInvalidHeaderFieldName = errors.New("http2: invalid header field name") - errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") -) - -// validWireHeaderFieldName reports whether v is a valid header field -// name (key). See httplex.ValidHeaderName for the base rules. -// -// Further, http2 says: -// "Just as in HTTP/1.x, header field names are strings of ASCII -// characters that are compared in a case-insensitive -// fashion. However, header field names MUST be converted to -// lowercase prior to their encoding in HTTP/2. " -func validWireHeaderFieldName(v string) bool { - if len(v) == 0 { - return false - } - for _, r := range v { - if !httplex.IsTokenRune(r) { - return false - } - if 'A' <= r && r <= 'Z' { - return false - } - } - return true -} - -var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) - -func init() { - for i := 100; i <= 999; i++ { - if v := http.StatusText(i); v != "" { - httpCodeStringCommon[i] = strconv.Itoa(i) - } - } -} - -func httpCodeString(code int) string { - if s, ok := httpCodeStringCommon[code]; ok { - return s - } - return strconv.Itoa(code) -} - -// from pkg io -type stringWriter interface { - WriteString(s string) (n int, err error) -} - -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - -// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). -type closeWaiter chan struct{} - -// Init makes a closeWaiter usable. -// It exists because so a closeWaiter value can be placed inside a -// larger struct and have the Mutex and Cond's memory in the same -// allocation. -func (cw *closeWaiter) Init() { - *cw = make(chan struct{}) -} - -// Close marks the closeWaiter as closed and unblocks any waiters. -func (cw closeWaiter) Close() { - close(cw) -} - -// Wait waits for the closeWaiter to become closed. -func (cw closeWaiter) Wait() { - <-cw -} - -// bufferedWriter is a buffered writer that writes to w. -// Its buffered writer is lazily allocated as needed, to minimize -// idle memory usage with many connections. -type bufferedWriter struct { - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered -} - -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} -} - -// bufWriterPoolBufferSize is the size of bufio.Writer's -// buffers created using bufWriterPool. -// -// TODO: pick a less arbitrary value? this is a bit under -// (3 x typical 1500 byte MTU) at least. Other than that, -// not much thought went into it. -const bufWriterPoolBufferSize = 4 << 10 - -var bufWriterPool = sync.Pool{ - New: func() interface{} { - return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) - }, -} - -func (w *bufferedWriter) Available() int { - if w.bw == nil { - return bufWriterPoolBufferSize - } - return w.bw.Available() -} - -func (w *bufferedWriter) Write(p []byte) (n int, err error) { - if w.bw == nil { - bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) - w.bw = bw - } - return w.bw.Write(p) -} - -func (w *bufferedWriter) Flush() error { - bw := w.bw - if bw == nil { - return nil - } - err := bw.Flush() - bw.Reset(nil) - bufWriterPool.Put(bw) - w.bw = nil - return err -} - -func mustUint31(v int32) uint32 { - if v < 0 || v > 2147483647 { - panic("out of range") - } - return uint32(v) -} - -// bodyAllowedForStatus reports whether a given response status code -// permits a body. See RFC 2616, section 4.4. -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -type httpError struct { - msg string - timeout bool -} - -func (e *httpError) Error() string { return e.msg } -func (e *httpError) Timeout() bool { return e.timeout } -func (e *httpError) Temporary() bool { return true } - -var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} - -type connectionStater interface { - ConnectionState() tls.ConnectionState -} - -var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} - -type sorter struct { - v []string // owned by sorter -} - -func (s *sorter) Len() int { return len(s.v) } -func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } -func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } - -// Keys returns the sorted keys of h. -// -// The returned slice is only valid until s used again or returned to -// its pool. -func (s *sorter) Keys(h http.Header) []string { - keys := s.v[:0] - for k := range h { - keys = append(keys, k) - } - s.v = keys - sort.Sort(s) - return keys -} - -func (s *sorter) SortStrings(ss []string) { - // Our sorter works on s.v, which sorter owns, so - // stash it away while we sort the user's buffer. - save := s.v - s.v = ss - sort.Sort(s) - s.v = save -} - -// validPseudoPath reports whether v is a valid :path pseudo-header -// value. It must be either: -// -// *) a non-empty string starting with '/', but not with with "//", -// *) the string '*', for OPTIONS requests. -// -// For now this is only used a quick check for deciding when to clean -// up Opaque URLs before sending requests from the Transport. -// See golang.org/issue/16847 -func validPseudoPath(v string) bool { - return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*" -} diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go deleted file mode 100644 index efd2e1282cf6..000000000000 --- a/vendor/golang.org/x/net/http2/not_go16.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.6 - -package http2 - -import ( - "crypto/tls" - "net/http" - "time" -) - -func configureTransport(t1 *http.Transport) (*Transport, error) { - return nil, errTransportVersion -} - -func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { - return 0 - -} - -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -func isBadCipher(cipher uint16) bool { - switch cipher { - case tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: - // Reject cipher suites from Appendix A. - // "This list includes those cipher suites that do not - // offer an ephemeral key exchange and those that are - // based on the TLS null, stream or block cipher type" - return true - default: - return false - } -} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go deleted file mode 100644 index 140434a791a3..000000000000 --- a/vendor/golang.org/x/net/http2/not_go17.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package http2 - -import ( - "crypto/tls" - "net" - "net/http" - "time" -) - -type contextContext interface { - Done() <-chan struct{} - Err() error -} - -type fakeContext struct{} - -func (fakeContext) Done() <-chan struct{} { return nil } -func (fakeContext) Err() error { panic("should not be called") } - -func reqContext(r *http.Request) fakeContext { - return fakeContext{} -} - -func setResponseUncompressed(res *http.Response) { - // Nothing. -} - -type clientTrace struct{} - -func requestTrace(*http.Request) *clientTrace { return nil } -func traceGotConn(*http.Request, *ClientConn) {} -func traceFirstResponseByte(*clientTrace) {} -func traceWroteHeaders(*clientTrace) {} -func traceWroteRequest(*clientTrace, error) {} -func traceGot100Continue(trace *clientTrace) {} -func traceWait100Continue(trace *clientTrace) {} - -func nop() {} - -func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { - return nil, nop -} - -func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { - return ctx, nop -} - -func requestWithContext(req *http.Request, ctx contextContext) *http.Request { - return req -} - -// temporary copy of Go 1.6's private tls.Config.clone: -func cloneTLSConfig(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - } -} - -func (cc *ClientConn) Ping(ctx contextContext) error { - return cc.ping(ctx) -} - -func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go deleted file mode 100644 index efbf83c32c45..000000000000 --- a/vendor/golang.org/x/net/http2/not_go18.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.8 - -package http2 - -import ( - "io" - "net/http" -) - -func configureServer18(h1 *http.Server, h2 *Server) error { - // No IdleTimeout to sync prior to Go 1.8. - return nil -} - -func shouldLogPanic(panicValue interface{}) bool { - return panicValue != nil -} - -func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { - return nil -} - -func reqBodyIsNoBody(io.ReadCloser) bool { return false } diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go deleted file mode 100644 index 53b7a1daf630..000000000000 --- a/vendor/golang.org/x/net/http2/pipe.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "io" - "sync" -) - -// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like -// io.Pipe except there are no PipeReader/PipeWriter halves, and the -// underlying buffer is an interface. (io.Pipe is always unbuffered) -type pipe struct { - mu sync.Mutex - c sync.Cond // c.L lazily initialized to &p.mu - b pipeBuffer - err error // read error once empty. non-nil means closed. - breakErr error // immediate read error (caller doesn't see rest of b) - donec chan struct{} // closed on error - readFn func() // optional code to run in Read before error -} - -type pipeBuffer interface { - Len() int - io.Writer - io.Reader -} - -func (p *pipe) Len() int { - p.mu.Lock() - defer p.mu.Unlock() - return p.b.Len() -} - -// Read waits until data is available and copies bytes -// from the buffer into p. -func (p *pipe) Read(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - for { - if p.breakErr != nil { - return 0, p.breakErr - } - if p.b.Len() > 0 { - return p.b.Read(d) - } - if p.err != nil { - if p.readFn != nil { - p.readFn() // e.g. copy trailers - p.readFn = nil // not sticky like p.err - } - return 0, p.err - } - p.c.Wait() - } -} - -var errClosedPipeWrite = errors.New("write on closed buffer") - -// Write copies bytes from p into the buffer and wakes a reader. -// It is an error to write more data than the buffer can hold. -func (p *pipe) Write(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - defer p.c.Signal() - if p.err != nil { - return 0, errClosedPipeWrite - } - return p.b.Write(d) -} - -// CloseWithError causes the next Read (waking up a current blocked -// Read if needed) to return the provided err after all data has been -// read. -// -// The error must be non-nil. -func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } - -// BreakWithError causes the next Read (waking up a current blocked -// Read if needed) to return the provided err immediately, without -// waiting for unread data. -func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } - -// closeWithErrorAndCode is like CloseWithError but also sets some code to run -// in the caller's goroutine before returning the error. -func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } - -func (p *pipe) closeWithError(dst *error, err error, fn func()) { - if err == nil { - panic("err must be non-nil") - } - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - defer p.c.Signal() - if *dst != nil { - // Already been done. - return - } - p.readFn = fn - *dst = err - p.closeDoneLocked() -} - -// requires p.mu be held. -func (p *pipe) closeDoneLocked() { - if p.donec == nil { - return - } - // Close if unclosed. This isn't racy since we always - // hold p.mu while closing. - select { - case <-p.donec: - default: - close(p.donec) - } -} - -// Err returns the error (if any) first set by BreakWithError or CloseWithError. -func (p *pipe) Err() error { - p.mu.Lock() - defer p.mu.Unlock() - if p.breakErr != nil { - return p.breakErr - } - return p.err -} - -// Done returns a channel which is closed if and when this pipe is closed -// with CloseWithError. -func (p *pipe) Done() <-chan struct{} { - p.mu.Lock() - defer p.mu.Unlock() - if p.donec == nil { - p.donec = make(chan struct{}) - if p.err != nil || p.breakErr != nil { - // Already hit an error. - p.closeDoneLocked() - } - } - return p.donec -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go deleted file mode 100644 index 3c6b90ccd55f..000000000000 --- a/vendor/golang.org/x/net/http2/server.go +++ /dev/null @@ -1,2753 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// TODO: turn off the serve goroutine when idle, so -// an idle conn only has the readFrames goroutine active. (which could -// also be optimized probably to pin less memory in crypto/tls). This -// would involve tracking when the serve goroutine is active (atomic -// int32 read/CAS probably?) and starting it up when frames arrive, -// and shutting it down when all handlers exit. the occasional PING -// packets could use time.AfterFunc to call sc.wakeStartServeLoop() -// (which is a no-op if already running) and then queue the PING write -// as normal. The serve loop would then exit in most cases (if no -// Handlers running) and not be woken up again until the PING packet -// returns. - -// TODO (maybe): add a mechanism for Handlers to going into -// half-closed-local mode (rw.(io.Closer) test?) but not exit their -// handler, and continue to be able to read from the -// Request.Body. This would be a somewhat semantic change from HTTP/1 -// (or at least what we expose in net/http), so I'd probably want to -// add it there too. For now, this package says that returning from -// the Handler ServeHTTP function means you're both done reading and -// done writing, without a way to stop just one or the other. - -package http2 - -import ( - "bufio" - "bytes" - "crypto/tls" - "errors" - "fmt" - "io" - "log" - "math" - "net" - "net/http" - "net/textproto" - "net/url" - "os" - "reflect" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/http2/hpack" -) - -const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? -) - -var ( - errClientDisconnected = errors.New("client disconnected") - errClosedBody = errors.New("body closed by handler") - errHandlerComplete = errors.New("http2: request body closed due to handler exiting") - errStreamClosed = errors.New("http2: stream closed") -) - -var responseWriterStatePool = sync.Pool{ - New: func() interface{} { - rws := &responseWriterState{} - rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) - return rws - }, -} - -// Test hooks. -var ( - testHookOnConn func() - testHookGetServerConn func(*serverConn) - testHookOnPanicMu *sync.Mutex // nil except in tests - testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) -) - -// Server is an HTTP/2 server. -type Server struct { - // MaxHandlers limits the number of http.Handler ServeHTTP goroutines - // which may run at a time over all connections. - // Negative or zero no limit. - // TODO: implement - MaxHandlers int - - // MaxConcurrentStreams optionally specifies the number of - // concurrent streams that each client may have open at a - // time. This is unrelated to the number of http.Handler goroutines - // which may be active globally, which is MaxHandlers. - // If zero, MaxConcurrentStreams defaults to at least 100, per - // the HTTP/2 spec's recommendations. - MaxConcurrentStreams uint32 - - // MaxReadFrameSize optionally specifies the largest frame - // this server is willing to read. A valid value is between - // 16k and 16M, inclusive. If zero or otherwise invalid, a - // default value is used. - MaxReadFrameSize uint32 - - // PermitProhibitedCipherSuites, if true, permits the use of - // cipher suites prohibited by the HTTP/2 spec. - PermitProhibitedCipherSuites bool - - // IdleTimeout specifies how long until idle clients should be - // closed with a GOAWAY frame. PING frames are not considered - // activity for the purposes of IdleTimeout. - IdleTimeout time.Duration - - // NewWriteScheduler constructs a write scheduler for a connection. - // If nil, a default scheduler is chosen. - NewWriteScheduler func() WriteScheduler -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -// ConfigureServer adds HTTP/2 support to a net/http Server. -// -// The configuration conf may be nil. -// -// ConfigureServer must be called before s begins serving. -func ConfigureServer(s *http.Server, conf *Server) error { - if s == nil { - panic("nil *http.Server") - } - if conf == nil { - conf = new(Server) - } - if err := configureServer18(s, conf); err != nil { - return err - } - - if s.TLSConfig == nil { - s.TLSConfig = new(tls.Config) - } else if s.TLSConfig.CipherSuites != nil { - // If they already provided a CipherSuite list, return - // an error if it has a bad order or is missing - // ECDHE_RSA_WITH_AES_128_GCM_SHA256. - const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - haveRequired := false - sawBad := false - for i, cs := range s.TLSConfig.CipherSuites { - if cs == requiredCipher { - haveRequired = true - } - if isBadCipher(cs) { - sawBad = true - } else if sawBad { - return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) - } - } - if !haveRequired { - return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") - } - } - - // Note: not setting MinVersion to tls.VersionTLS12, - // as we don't want to interfere with HTTP/1.1 traffic - // on the user's server. We enforce TLS 1.2 later once - // we accept a connection. Ideally this should be done - // during next-proto selection, but using TLS <1.2 with - // HTTP/2 is still the client's bug. - - s.TLSConfig.PreferServerCipherSuites = true - - haveNPN := false - for _, p := range s.TLSConfig.NextProtos { - if p == NextProtoTLS { - haveNPN = true - break - } - } - if !haveNPN { - s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) - } - - if s.TLSNextProto == nil { - s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} - } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { - if testHookOnConn != nil { - testHookOnConn() - } - conf.ServeConn(c, &ServeConnOpts{ - Handler: h, - BaseConfig: hs, - }) - } - s.TLSNextProto[NextProtoTLS] = protoHandler - return nil -} - -// ServeConnOpts are options for the Server.ServeConn method. -type ServeConnOpts struct { - // BaseConfig optionally sets the base configuration - // for values. If nil, defaults are used. - BaseConfig *http.Server - - // Handler specifies which handler to use for processing - // requests. If nil, BaseConfig.Handler is used. If BaseConfig - // or BaseConfig.Handler is nil, http.DefaultServeMux is used. - Handler http.Handler -} - -func (o *ServeConnOpts) baseConfig() *http.Server { - if o != nil && o.BaseConfig != nil { - return o.BaseConfig - } - return new(http.Server) -} - -func (o *ServeConnOpts) handler() http.Handler { - if o != nil { - if o.Handler != nil { - return o.Handler - } - if o.BaseConfig != nil && o.BaseConfig.Handler != nil { - return o.BaseConfig.Handler - } - } - return http.DefaultServeMux -} - -// ServeConn serves HTTP/2 requests on the provided connection and -// blocks until the connection is no longer readable. -// -// ServeConn starts speaking HTTP/2 assuming that c has not had any -// reads or writes. It writes its initial settings frame and expects -// to be able to read the preface and settings frame from the -// client. If c has a ConnectionState method like a *tls.Conn, the -// ConnectionState is used to verify the TLS ciphersuite and to set -// the Request.TLS field in Handlers. -// -// ServeConn does not support h2c by itself. Any h2c support must be -// implemented in terms of providing a suitably-behaving net.Conn. -// -// The opts parameter is optional. If nil, default values are used. -func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { - baseCtx, cancel := serverConnBaseContext(c, opts) - defer cancel() - - sc := &serverConn{ - srv: s, - hs: opts.baseConfig(), - conn: c, - baseCtx: baseCtx, - remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), - handler: opts.handler(), - streams: make(map[uint32]*stream), - readFrameCh: make(chan readFrameResult), - wantWriteFrameCh: make(chan FrameWriteRequest, 8), - wantStartPushCh: make(chan startPushRequest, 8), - wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync - bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way - doneServing: make(chan struct{}), - clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), - initialWindowSize: initialWindowSize, - maxFrameSize: initialMaxFrameSize, - headerTableSize: initialHeaderTableSize, - serveG: newGoroutineLock(), - pushEnabled: true, - } - - // The net/http package sets the write deadline from the - // http.Server.WriteTimeout during the TLS handshake, but then - // passes the connection off to us with the deadline already - // set. Disarm it here so that it is not applied to additional - // streams opened on this connection. - // TODO: implement WriteTimeout fully. See Issue 18437. - if sc.hs.WriteTimeout != 0 { - sc.conn.SetWriteDeadline(time.Time{}) - } - - if s.NewWriteScheduler != nil { - sc.writeSched = s.NewWriteScheduler() - } else { - sc.writeSched = NewRandomWriteScheduler() - } - - sc.flow.add(initialWindowSize) - sc.inflow.add(initialWindowSize) - sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - - fr := NewFramer(sc.bw, c) - fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) - fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) - sc.framer = fr - - if tc, ok := c.(connectionStater); ok { - sc.tlsState = new(tls.ConnectionState) - *sc.tlsState = tc.ConnectionState() - // 9.2 Use of TLS Features - // An implementation of HTTP/2 over TLS MUST use TLS - // 1.2 or higher with the restrictions on feature set - // and cipher suite described in this section. Due to - // implementation limitations, it might not be - // possible to fail TLS negotiation. An endpoint MUST - // immediately terminate an HTTP/2 connection that - // does not meet the TLS requirements described in - // this section with a connection error (Section - // 5.4.1) of type INADEQUATE_SECURITY. - if sc.tlsState.Version < tls.VersionTLS12 { - sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") - return - } - - if sc.tlsState.ServerName == "" { - // Client must use SNI, but we don't enforce that anymore, - // since it was causing problems when connecting to bare IP - // addresses during development. - // - // TODO: optionally enforce? Or enforce at the time we receive - // a new request, and verify the the ServerName matches the :authority? - // But that precludes proxy situations, perhaps. - // - // So for now, do nothing here again. - } - - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { - // "Endpoints MAY choose to generate a connection error - // (Section 5.4.1) of type INADEQUATE_SECURITY if one of - // the prohibited cipher suites are negotiated." - // - // We choose that. In my opinion, the spec is weak - // here. It also says both parties must support at least - // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no - // excuses here. If we really must, we could allow an - // "AllowInsecureWeakCiphers" option on the server later. - // Let's see how it plays out first. - sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) - return - } - } - - if hook := testHookGetServerConn; hook != nil { - hook(sc) - } - sc.serve() -} - -func (sc *serverConn) rejectConn(err ErrCode, debug string) { - sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) - // ignoring errors. hanging up anyway. - sc.framer.WriteGoAway(0, err, []byte(debug)) - sc.bw.Flush() - sc.conn.Close() -} - -type serverConn struct { - // Immutable: - srv *Server - hs *http.Server - conn net.Conn - bw *bufferedWriter // writing to conn - handler http.Handler - baseCtx contextContext - framer *Framer - doneServing chan struct{} // closed when serverConn.serve ends - readFrameCh chan readFrameResult // written by serverConn.readFrames - wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve - wantStartPushCh chan startPushRequest // from handlers -> serve - wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes - bodyReadCh chan bodyReadMsg // from handlers -> serve - testHookCh chan func(int) // code to run on the serve loop - flow flow // conn-wide (not stream-specific) outbound flow control - inflow flow // conn-wide inbound flow control - tlsState *tls.ConnectionState // shared by all handlers, like net/http - remoteAddrStr string - writeSched WriteScheduler - - // Everything following is owned by the serve loop; use serveG.check(): - serveG goroutineLock // used to verify funcs are on serve() - pushEnabled bool - sawFirstSettings bool // got the initial SETTINGS frame after the preface - needToSendSettingsAck bool - unackedSettings int // how many SETTINGS have we sent without ACKs? - clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) - advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client - curClientStreams uint32 // number of open streams initiated by the client - curPushedStreams uint32 // number of open streams initiated by server push - maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests - maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes - streams map[uint32]*stream - initialWindowSize int32 - maxFrameSize int32 - headerTableSize uint32 - peerMaxHeaderListSize uint32 // zero means unknown (default) - canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case - writingFrame bool // started writing a frame (on serve goroutine or separate) - writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh - needsFrameFlush bool // last frame write wasn't a flush - inGoAway bool // we've started to or sent GOAWAY - inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop - needToSendGoAway bool // we need to schedule a GOAWAY frame write - goAwayCode ErrCode - shutdownTimerCh <-chan time.Time // nil until used - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused - idleTimerCh <-chan time.Time // nil if unused - - // Owned by the writeFrameAsync goroutine: - headerWriteBuf bytes.Buffer - hpackEncoder *hpack.Encoder -} - -func (sc *serverConn) maxHeaderListSize() uint32 { - n := sc.hs.MaxHeaderBytes - if n <= 0 { - n = http.DefaultMaxHeaderBytes - } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) -} - -func (sc *serverConn) curOpenStreams() uint32 { - sc.serveG.check() - return sc.curClientStreams + sc.curPushedStreams -} - -// stream represents a stream. This is the minimal metadata needed by -// the serve goroutine. Most of the actual stream state is owned by -// the http.Handler's goroutine in the responseWriter. Because the -// responseWriter's responseWriterState is recycled at the end of a -// handler, this struct intentionally has no pointer to the -// *responseWriter{,State} itself, as the Handler ending nils out the -// responseWriter's state field. -type stream struct { - // immutable: - sc *serverConn - id uint32 - body *pipe // non-nil if expecting DATA frames - cw closeWaiter // closed wait stream transitions to closed state - ctx contextContext - cancelCtx func() - - // owned by serverConn's serve loop: - bodyBytes int64 // body bytes seen so far - declBodyBytes int64 // or -1 if undeclared - flow flow // limits writing from Handler to client - inflow flow // what the client is allowed to POST/etc to us - parent *stream // or nil - numTrailerValues int64 - weight uint8 - state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - reqBuf []byte // if non-nil, body pipe buffer to return later at EOF - - trailer http.Header // accumulated trailers - reqTrailer http.Header // handler's Request.Trailer -} - -func (sc *serverConn) Framer() *Framer { return sc.framer } -func (sc *serverConn) CloseConn() error { return sc.conn.Close() } -func (sc *serverConn) Flush() error { return sc.bw.Flush() } -func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { - return sc.hpackEncoder, &sc.headerWriteBuf -} - -func (sc *serverConn) state(streamID uint32) (streamState, *stream) { - sc.serveG.check() - // http://tools.ietf.org/html/rfc7540#section-5.1 - if st, ok := sc.streams[streamID]; ok { - return st.state, st - } - // "The first use of a new stream identifier implicitly closes all - // streams in the "idle" state that might have been initiated by - // that peer with a lower-valued stream identifier. For example, if - // a client sends a HEADERS frame on stream 7 without ever sending a - // frame on stream 5, then stream 5 transitions to the "closed" - // state when the first frame for stream 7 is sent or received." - if streamID%2 == 1 { - if streamID <= sc.maxClientStreamID { - return stateClosed, nil - } - } else { - if streamID <= sc.maxPushPromiseID { - return stateClosed, nil - } - } - return stateIdle, nil -} - -// setConnState calls the net/http ConnState hook for this connection, if configured. -// Note that the net/http package does StateNew and StateClosed for us. -// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. -func (sc *serverConn) setConnState(state http.ConnState) { - if sc.hs.ConnState != nil { - sc.hs.ConnState(sc.conn, state) - } -} - -func (sc *serverConn) vlogf(format string, args ...interface{}) { - if VerboseLogs { - sc.logf(format, args...) - } -} - -func (sc *serverConn) logf(format string, args ...interface{}) { - if lg := sc.hs.ErrorLog; lg != nil { - lg.Printf(format, args...) - } else { - log.Printf(format, args...) - } -} - -// errno returns v's underlying uintptr, else 0. -// -// TODO: remove this helper function once http2 can use build -// tags. See comment in isClosedConnError. -func errno(v error) uintptr { - if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { - return uintptr(rv.Uint()) - } - return 0 -} - -// isClosedConnError reports whether err is an error from use of a closed -// network connection. -func isClosedConnError(err error) bool { - if err == nil { - return false - } - - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { - return true - } - - // TODO(bradfitz): x/tools/cmd/bundle doesn't really support - // build tags, so I can't make an http2_windows.go file with - // Windows-specific stuff. Fix that and move this, once we - // have a way to bundle this into std's net/http somehow. - if runtime.GOOS == "windows" { - if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { - if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { - const WSAECONNABORTED = 10053 - const WSAECONNRESET = 10054 - if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { - return true - } - } - } - } - return false -} - -func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { - if err == nil { - return - } - if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { - // Boring, expected errors. - sc.vlogf(format, args...) - } else { - sc.logf(format, args...) - } -} - -func (sc *serverConn) canonicalHeader(v string) string { - sc.serveG.check() - cv, ok := commonCanonHeader[v] - if ok { - return cv - } - cv, ok = sc.canonHeader[v] - if ok { - return cv - } - if sc.canonHeader == nil { - sc.canonHeader = make(map[string]string) - } - cv = http.CanonicalHeaderKey(v) - sc.canonHeader[v] = cv - return cv -} - -type readFrameResult struct { - f Frame // valid until readMore is called - err error - - // readMore should be called once the consumer no longer needs or - // retains f. After readMore, f is invalid and more frames can be - // read. - readMore func() -} - -// readFrames is the loop that reads incoming frames. -// It takes care to only read one frame at a time, blocking until the -// consumer is done with the frame. -// It's run on its own goroutine. -func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done - for { - f, err := sc.framer.ReadFrame() - select { - case sc.readFrameCh <- readFrameResult{f, err, gateDone}: - case <-sc.doneServing: - return - } - select { - case <-gate: - case <-sc.doneServing: - return - } - if terminalReadFrameError(err) { - return - } - } -} - -// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. -type frameWriteResult struct { - wr FrameWriteRequest // what was written (or attempted) - err error // result of the writeFrame call -} - -// writeFrameAsync runs in its own goroutine and writes a single frame -// and then reports when it's done. -// At most one goroutine can be running writeFrameAsync at a time per -// serverConn. -func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { - err := wr.write.writeFrame(sc) - sc.wroteFrameCh <- frameWriteResult{wr, err} -} - -func (sc *serverConn) closeAllStreamsOnConnClose() { - sc.serveG.check() - for _, st := range sc.streams { - sc.closeStream(st, errClientDisconnected) - } -} - -func (sc *serverConn) stopShutdownTimer() { - sc.serveG.check() - if t := sc.shutdownTimer; t != nil { - t.Stop() - } -} - -func (sc *serverConn) notePanic() { - // Note: this is for serverConn.serve panicking, not http.Handler code. - if testHookOnPanicMu != nil { - testHookOnPanicMu.Lock() - defer testHookOnPanicMu.Unlock() - } - if testHookOnPanic != nil { - if e := recover(); e != nil { - if testHookOnPanic(sc, e) { - panic(e) - } - } - } -} - -func (sc *serverConn) serve() { - sc.serveG.check() - defer sc.notePanic() - defer sc.conn.Close() - defer sc.closeAllStreamsOnConnClose() - defer sc.stopShutdownTimer() - defer close(sc.doneServing) // unblocks handlers trying to send - - if VerboseLogs { - sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) - } - - sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - - // TODO: more actual settings, notably - // SettingInitialWindowSize, but then we also - // want to bump up the conn window size the - // same amount here right after the settings - }, - }) - sc.unackedSettings++ - - if err := sc.readPreface(); err != nil { - sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) - return - } - // Now that we've got the preface, get us out of the - // "StateNew" state. We can't go directly to idle, though. - // Active means we read some data and anticipate a request. We'll - // do another Active when we get a HEADERS frame. - sc.setConnState(http.StateActive) - sc.setConnState(http.StateIdle) - - if sc.srv.IdleTimeout != 0 { - sc.idleTimer = time.NewTimer(sc.srv.IdleTimeout) - defer sc.idleTimer.Stop() - sc.idleTimerCh = sc.idleTimer.C - } - - var gracefulShutdownCh <-chan struct{} - if sc.hs != nil { - gracefulShutdownCh = h1ServerShutdownChan(sc.hs) - } - - go sc.readFrames() // closed by defer sc.conn.Close above - - settingsTimer := time.NewTimer(firstSettingsTimeout) - loopNum := 0 - for { - loopNum++ - select { - case wr := <-sc.wantWriteFrameCh: - sc.writeFrame(wr) - case spr := <-sc.wantStartPushCh: - sc.startPush(spr) - case res := <-sc.wroteFrameCh: - sc.wroteFrame(res) - case res := <-sc.readFrameCh: - if !sc.processFrameFromReader(res) { - return - } - res.readMore() - if settingsTimer.C != nil { - settingsTimer.Stop() - settingsTimer.C = nil - } - case m := <-sc.bodyReadCh: - sc.noteBodyRead(m.st, m.n) - case <-settingsTimer.C: - sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) - return - case <-gracefulShutdownCh: - gracefulShutdownCh = nil - sc.startGracefulShutdown() - case <-sc.shutdownTimerCh: - sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) - return - case <-sc.idleTimerCh: - sc.vlogf("connection is idle") - sc.goAway(ErrCodeNo) - case fn := <-sc.testHookCh: - fn(loopNum) - } - - if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame { - return - } - } -} - -// readPreface reads the ClientPreface greeting from the peer -// or returns an error on timeout or an invalid greeting. -func (sc *serverConn) readPreface() error { - errc := make(chan error, 1) - go func() { - // Read the client preface - buf := make([]byte, len(ClientPreface)) - if _, err := io.ReadFull(sc.conn, buf); err != nil { - errc <- err - } else if !bytes.Equal(buf, clientPreface) { - errc <- fmt.Errorf("bogus greeting %q", buf) - } else { - errc <- nil - } - }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? - defer timer.Stop() - select { - case <-timer.C: - return errors.New("timeout waiting for client preface") - case err := <-errc: - if err == nil { - if VerboseLogs { - sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) - } - } - return err - } -} - -var errChanPool = sync.Pool{ - New: func() interface{} { return make(chan error, 1) }, -} - -var writeDataPool = sync.Pool{ - New: func() interface{} { return new(writeData) }, -} - -// writeDataFromHandler writes DATA response frames from a handler on -// the given stream. -func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { - ch := errChanPool.Get().(chan error) - writeArg := writeDataPool.Get().(*writeData) - *writeArg = writeData{stream.id, data, endStream} - err := sc.writeFrameFromHandler(FrameWriteRequest{ - write: writeArg, - stream: stream, - done: ch, - }) - if err != nil { - return err - } - var frameWriteDone bool // the frame write is done (successfully or not) - select { - case err = <-ch: - frameWriteDone = true - case <-sc.doneServing: - return errClientDisconnected - case <-stream.cw: - // If both ch and stream.cw were ready (as might - // happen on the final Write after an http.Handler - // ends), prefer the write result. Otherwise this - // might just be us successfully closing the stream. - // The writeFrameAsync and serve goroutines guarantee - // that the ch send will happen before the stream.cw - // close. - select { - case err = <-ch: - frameWriteDone = true - default: - return errStreamClosed - } - } - errChanPool.Put(ch) - if frameWriteDone { - writeDataPool.Put(writeArg) - } - return err -} - -// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts -// if the connection has gone away. -// -// This must not be run from the serve goroutine itself, else it might -// deadlock writing to sc.wantWriteFrameCh (which is only mildly -// buffered and is read by serve itself). If you're on the serve -// goroutine, call writeFrame instead. -func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { - sc.serveG.checkNotOn() // NOT - select { - case sc.wantWriteFrameCh <- wr: - return nil - case <-sc.doneServing: - // Serve loop is gone. - // Client has closed their connection to the server. - return errClientDisconnected - } -} - -// writeFrame schedules a frame to write and sends it if there's nothing -// already being written. -// -// There is no pushback here (the serve goroutine never blocks). It's -// the http.Handlers that block, waiting for their previous frames to -// make it onto the wire -// -// If you're not on the serve goroutine, use writeFrameFromHandler instead. -func (sc *serverConn) writeFrame(wr FrameWriteRequest) { - sc.serveG.check() - - // If true, wr will not be written and wr.done will not be signaled. - var ignoreWrite bool - - // We are not allowed to write frames on closed streams. RFC 7540 Section - // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on - // a closed stream." Our server never sends PRIORITY, so that exception - // does not apply. - // - // The serverConn might close an open stream while the stream's handler - // is still running. For example, the server might close a stream when it - // receives bad data from the client. If this happens, the handler might - // attempt to write a frame after the stream has been closed (since the - // handler hasn't yet been notified of the close). In this case, we simply - // ignore the frame. The handler will notice that the stream is closed when - // it waits for the frame to be written. - // - // As an exception to this rule, we allow sending RST_STREAM after close. - // This allows us to immediately reject new streams without tracking any - // state for those streams (except for the queued RST_STREAM frame). This - // may result in duplicate RST_STREAMs in some cases, but the client should - // ignore those. - if wr.StreamID() != 0 { - _, isReset := wr.write.(StreamError) - if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { - ignoreWrite = true - } - } - - // Don't send a 100-continue response if we've already sent headers. - // See golang.org/issue/14030. - switch wr.write.(type) { - case *writeResHeaders: - wr.stream.wroteHeaders = true - case write100ContinueHeadersFrame: - if wr.stream.wroteHeaders { - // We do not need to notify wr.done because this frame is - // never written with wr.done != nil. - if wr.done != nil { - panic("wr.done != nil for write100ContinueHeadersFrame") - } - ignoreWrite = true - } - } - - if !ignoreWrite { - sc.writeSched.Push(wr) - } - sc.scheduleFrameWrite() -} - -// startFrameWrite starts a goroutine to write wr (in a separate -// goroutine since that might block on the network), and updates the -// serve goroutine's state about the world, updated from info in wr. -func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { - sc.serveG.check() - if sc.writingFrame { - panic("internal error: can only be writing one frame at a time") - } - - st := wr.stream - if st != nil { - switch st.state { - case stateHalfClosedLocal: - switch wr.write.(type) { - case StreamError, handlerPanicRST, writeWindowUpdate: - // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE - // in this state. (We never send PRIORITY from the server, so that is not checked.) - default: - panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) - } - case stateClosed: - panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) - } - } - if wpp, ok := wr.write.(*writePushPromise); ok { - var err error - wpp.promisedID, err = wpp.allocatePromisedID() - if err != nil { - sc.writingFrameAsync = false - wr.replyToWriter(err) - return - } - } - - sc.writingFrame = true - sc.needsFrameFlush = true - if wr.write.staysWithinBuffer(sc.bw.Available()) { - sc.writingFrameAsync = false - err := wr.write.writeFrame(sc) - sc.wroteFrame(frameWriteResult{wr, err}) - } else { - sc.writingFrameAsync = true - go sc.writeFrameAsync(wr) - } -} - -// errHandlerPanicked is the error given to any callers blocked in a read from -// Request.Body when the main goroutine panics. Since most handlers read in the -// the main ServeHTTP goroutine, this will show up rarely. -var errHandlerPanicked = errors.New("http2: handler panicked") - -// wroteFrame is called on the serve goroutine with the result of -// whatever happened on writeFrameAsync. -func (sc *serverConn) wroteFrame(res frameWriteResult) { - sc.serveG.check() - if !sc.writingFrame { - panic("internal error: expected to be already writing a frame") - } - sc.writingFrame = false - sc.writingFrameAsync = false - - wr := res.wr - - if writeEndsStream(wr.write) { - st := wr.stream - if st == nil { - panic("internal error: expecting non-nil stream") - } - switch st.state { - case stateOpen: - // Here we would go to stateHalfClosedLocal in - // theory, but since our handler is done and - // the net/http package provides no mechanism - // for closing a ResponseWriter while still - // reading data (see possible TODO at top of - // this file), we go into closed state here - // anyway, after telling the peer we're - // hanging up on them. We'll transition to - // stateClosed after the RST_STREAM frame is - // written. - st.state = stateHalfClosedLocal - sc.resetStream(streamError(st.id, ErrCodeCancel)) - case stateHalfClosedRemote: - sc.closeStream(st, errHandlerComplete) - } - } else { - switch v := wr.write.(type) { - case StreamError: - // st may be unknown if the RST_STREAM was generated to reject bad input. - if st, ok := sc.streams[v.StreamID]; ok { - sc.closeStream(st, v) - } - case handlerPanicRST: - sc.closeStream(wr.stream, errHandlerPanicked) - } - } - - // Reply (if requested) to unblock the ServeHTTP goroutine. - wr.replyToWriter(res.err) - - sc.scheduleFrameWrite() -} - -// scheduleFrameWrite tickles the frame writing scheduler. -// -// If a frame is already being written, nothing happens. This will be called again -// when the frame is done being written. -// -// If a frame isn't being written we need to send one, the best frame -// to send is selected, preferring first things that aren't -// stream-specific (e.g. ACKing settings), and then finding the -// highest priority stream. -// -// If a frame isn't being written and there's nothing else to send, we -// flush the write buffer. -func (sc *serverConn) scheduleFrameWrite() { - sc.serveG.check() - if sc.writingFrame || sc.inFrameScheduleLoop { - return - } - sc.inFrameScheduleLoop = true - for !sc.writingFrameAsync { - if sc.needToSendGoAway { - sc.needToSendGoAway = false - sc.startFrameWrite(FrameWriteRequest{ - write: &writeGoAway{ - maxStreamID: sc.maxClientStreamID, - code: sc.goAwayCode, - }, - }) - continue - } - if sc.needToSendSettingsAck { - sc.needToSendSettingsAck = false - sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) - continue - } - if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { - if wr, ok := sc.writeSched.Pop(); ok { - sc.startFrameWrite(wr) - continue - } - } - if sc.needsFrameFlush { - sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) - sc.needsFrameFlush = false // after startFrameWrite, since it sets this true - continue - } - break - } - sc.inFrameScheduleLoop = false -} - -// startGracefulShutdown sends a GOAWAY with ErrCodeNo to tell the -// client we're gracefully shutting down. The connection isn't closed -// until all current streams are done. -func (sc *serverConn) startGracefulShutdown() { - sc.goAwayIn(ErrCodeNo, 0) -} - -func (sc *serverConn) goAway(code ErrCode) { - sc.serveG.check() - var forceCloseIn time.Duration - if code != ErrCodeNo { - forceCloseIn = 250 * time.Millisecond - } else { - // TODO: configurable - forceCloseIn = 1 * time.Second - } - sc.goAwayIn(code, forceCloseIn) -} - -func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) { - sc.serveG.check() - if sc.inGoAway { - return - } - if forceCloseIn != 0 { - sc.shutDownIn(forceCloseIn) - } - sc.inGoAway = true - sc.needToSendGoAway = true - sc.goAwayCode = code - sc.scheduleFrameWrite() -} - -func (sc *serverConn) shutDownIn(d time.Duration) { - sc.serveG.check() - sc.shutdownTimer = time.NewTimer(d) - sc.shutdownTimerCh = sc.shutdownTimer.C -} - -func (sc *serverConn) resetStream(se StreamError) { - sc.serveG.check() - sc.writeFrame(FrameWriteRequest{write: se}) - if st, ok := sc.streams[se.StreamID]; ok { - st.resetQueued = true - } -} - -// processFrameFromReader processes the serve loop's read from readFrameCh from the -// frame-reading goroutine. -// processFrameFromReader returns whether the connection should be kept open. -func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { - sc.serveG.check() - err := res.err - if err != nil { - if err == ErrFrameTooLarge { - sc.goAway(ErrCodeFrameSize) - return true // goAway will close the loop - } - clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) - if clientGone { - // TODO: could we also get into this state if - // the peer does a half close - // (e.g. CloseWrite) because they're done - // sending frames but they're still wanting - // our open replies? Investigate. - // TODO: add CloseWrite to crypto/tls.Conn first - // so we have a way to test this? I suppose - // just for testing we could have a non-TLS mode. - return false - } - } else { - f := res.f - if VerboseLogs { - sc.vlogf("http2: server read frame %v", summarizeFrame(f)) - } - err = sc.processFrame(f) - if err == nil { - return true - } - } - - switch ev := err.(type) { - case StreamError: - sc.resetStream(ev) - return true - case goAwayFlowError: - sc.goAway(ErrCodeFlowControl) - return true - case ConnectionError: - sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) - sc.goAway(ErrCode(ev)) - return true // goAway will handle shutdown - default: - if res.err != nil { - sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) - } else { - sc.logf("http2: server closing client connection: %v", err) - } - return false - } -} - -func (sc *serverConn) processFrame(f Frame) error { - sc.serveG.check() - - // First frame received must be SETTINGS. - if !sc.sawFirstSettings { - if _, ok := f.(*SettingsFrame); !ok { - return ConnectionError(ErrCodeProtocol) - } - sc.sawFirstSettings = true - } - - switch f := f.(type) { - case *SettingsFrame: - return sc.processSettings(f) - case *MetaHeadersFrame: - return sc.processHeaders(f) - case *WindowUpdateFrame: - return sc.processWindowUpdate(f) - case *PingFrame: - return sc.processPing(f) - case *DataFrame: - return sc.processData(f) - case *RSTStreamFrame: - return sc.processResetStream(f) - case *PriorityFrame: - return sc.processPriority(f) - case *GoAwayFrame: - return sc.processGoAway(f) - case *PushPromiseFrame: - // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE - // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) - default: - sc.vlogf("http2: server ignoring frame: %v", f.Header()) - return nil - } -} - -func (sc *serverConn) processPing(f *PingFrame) error { - sc.serveG.check() - if f.IsAck() { - // 6.7 PING: " An endpoint MUST NOT respond to PING frames - // containing this flag." - return nil - } - if f.StreamID != 0 { - // "PING frames are not associated with any individual - // stream. If a PING frame is received with a stream - // identifier field value other than 0x0, the recipient MUST - // respond with a connection error (Section 5.4.1) of type - // PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if sc.inGoAway && sc.goAwayCode != ErrCodeNo { - return nil - } - sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) - return nil -} - -func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { - sc.serveG.check() - switch { - case f.StreamID != 0: // stream-level flow control - state, st := sc.state(f.StreamID) - if state == stateIdle { - // Section 5.1: "Receiving any frame other than HEADERS - // or PRIORITY on a stream in this state MUST be - // treated as a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if st == nil { - // "WINDOW_UPDATE can be sent by a peer that has sent a - // frame bearing the END_STREAM flag. This means that a - // receiver could receive a WINDOW_UPDATE frame on a "half - // closed (remote)" or "closed" stream. A receiver MUST - // NOT treat this as an error, see Section 5.1." - return nil - } - if !st.flow.add(int32(f.Increment)) { - return streamError(f.StreamID, ErrCodeFlowControl) - } - default: // connection-level flow control - if !sc.flow.add(int32(f.Increment)) { - return goAwayFlowError{} - } - } - sc.scheduleFrameWrite() - return nil -} - -func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { - sc.serveG.check() - - state, st := sc.state(f.StreamID) - if state == stateIdle { - // 6.4 "RST_STREAM frames MUST NOT be sent for a - // stream in the "idle" state. If a RST_STREAM frame - // identifying an idle stream is received, the - // recipient MUST treat this as a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) - } - if st != nil { - st.cancelCtx() - sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) - } - return nil -} - -func (sc *serverConn) closeStream(st *stream, err error) { - sc.serveG.check() - if st.state == stateIdle || st.state == stateClosed { - panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) - } - st.state = stateClosed - if st.isPushed() { - sc.curPushedStreams-- - } else { - sc.curClientStreams-- - } - delete(sc.streams, st.id) - if len(sc.streams) == 0 { - sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { - sc.idleTimer.Reset(sc.srv.IdleTimeout) - } - if h1ServerKeepAlivesDisabled(sc.hs) { - sc.startGracefulShutdown() - } - } - if p := st.body; p != nil { - // Return any buffered unread bytes worth of conn-level flow control. - // See golang.org/issue/16481 - sc.sendWindowUpdate(nil, p.Len()) - - p.CloseWithError(err) - } - st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc - sc.writeSched.CloseStream(st.id) -} - -func (sc *serverConn) processSettings(f *SettingsFrame) error { - sc.serveG.check() - if f.IsAck() { - sc.unackedSettings-- - if sc.unackedSettings < 0 { - // Why is the peer ACKing settings we never sent? - // The spec doesn't mention this case, but - // hang up on them anyway. - return ConnectionError(ErrCodeProtocol) - } - return nil - } - if err := f.ForeachSetting(sc.processSetting); err != nil { - return err - } - sc.needToSendSettingsAck = true - sc.scheduleFrameWrite() - return nil -} - -func (sc *serverConn) processSetting(s Setting) error { - sc.serveG.check() - if err := s.Valid(); err != nil { - return err - } - if VerboseLogs { - sc.vlogf("http2: server processing setting %v", s) - } - switch s.ID { - case SettingHeaderTableSize: - sc.headerTableSize = s.Val - sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) - case SettingEnablePush: - sc.pushEnabled = s.Val != 0 - case SettingMaxConcurrentStreams: - sc.clientMaxStreams = s.Val - case SettingInitialWindowSize: - return sc.processSettingInitialWindowSize(s.Val) - case SettingMaxFrameSize: - sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 - case SettingMaxHeaderListSize: - sc.peerMaxHeaderListSize = s.Val - default: - // Unknown setting: "An endpoint that receives a SETTINGS - // frame with any unknown or unsupported identifier MUST - // ignore that setting." - if VerboseLogs { - sc.vlogf("http2: server ignoring unknown setting %v", s) - } - } - return nil -} - -func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { - sc.serveG.check() - // Note: val already validated to be within range by - // processSetting's Valid call. - - // "A SETTINGS frame can alter the initial flow control window - // size for all current streams. When the value of - // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST - // adjust the size of all stream flow control windows that it - // maintains by the difference between the new value and the - // old value." - old := sc.initialWindowSize - sc.initialWindowSize = int32(val) - growth := sc.initialWindowSize - old // may be negative - for _, st := range sc.streams { - if !st.flow.add(growth) { - // 6.9.2 Initial Flow Control Window Size - // "An endpoint MUST treat a change to - // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow - // control window to exceed the maximum size as a - // connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR." - return ConnectionError(ErrCodeFlowControl) - } - } - return nil -} - -func (sc *serverConn) processData(f *DataFrame) error { - sc.serveG.check() - if sc.inGoAway && sc.goAwayCode != ErrCodeNo { - return nil - } - data := f.Data() - - // "If a DATA frame is received whose stream is not in "open" - // or "half closed (local)" state, the recipient MUST respond - // with a stream error (Section 5.4.2) of type STREAM_CLOSED." - id := f.Header().StreamID - state, st := sc.state(id) - if id == 0 || state == stateIdle { - // Section 5.1: "Receiving any frame other than HEADERS - // or PRIORITY on a stream in this state MUST be - // treated as a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { - // This includes sending a RST_STREAM if the stream is - // in stateHalfClosedLocal (which currently means that - // the http.Handler returned, so it's done reading & - // done writing). Try to stop the client from sending - // more DATA. - - // But still enforce their connection-level flow control, - // and return any flow control bytes since we're not going - // to consume them. - if sc.inflow.available() < int32(f.Length) { - return streamError(id, ErrCodeFlowControl) - } - // Deduct the flow control from inflow, since we're - // going to immediately add it back in - // sendWindowUpdate, which also schedules sending the - // frames. - sc.inflow.take(int32(f.Length)) - sc.sendWindowUpdate(nil, int(f.Length)) // conn-level - - if st != nil && st.resetQueued { - // Already have a stream error in flight. Don't send another. - return nil - } - return streamError(id, ErrCodeStreamClosed) - } - if st.body == nil { - panic("internal error: should have a body in this state") - } - - // Sender sending more than they'd declared? - if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { - st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) - return streamError(id, ErrCodeStreamClosed) - } - if f.Length > 0 { - // Check whether the client has flow control quota. - if st.inflow.available() < int32(f.Length) { - return streamError(id, ErrCodeFlowControl) - } - st.inflow.take(int32(f.Length)) - - if len(data) > 0 { - wrote, err := st.body.Write(data) - if err != nil { - return streamError(id, ErrCodeStreamClosed) - } - if wrote != len(data) { - panic("internal error: bad Writer") - } - st.bodyBytes += int64(len(data)) - } - - // Return any padded flow control now, since we won't - // refund it later on body reads. - if pad := int32(f.Length) - int32(len(data)); pad > 0 { - sc.sendWindowUpdate32(nil, pad) - sc.sendWindowUpdate32(st, pad) - } - } - if f.StreamEnded() { - st.endStream() - } - return nil -} - -func (sc *serverConn) processGoAway(f *GoAwayFrame) error { - sc.serveG.check() - if f.ErrCode != ErrCodeNo { - sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) - } else { - sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) - } - sc.startGracefulShutdown() - // http://tools.ietf.org/html/rfc7540#section-6.8 - // We should not create any new streams, which means we should disable push. - sc.pushEnabled = false - return nil -} - -// isPushed reports whether the stream is server-initiated. -func (st *stream) isPushed() bool { - return st.id%2 == 0 -} - -// endStream closes a Request.Body's pipe. It is called when a DATA -// frame says a request body is over (or after trailers). -func (st *stream) endStream() { - sc := st.sc - sc.serveG.check() - - if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { - st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", - st.declBodyBytes, st.bodyBytes)) - } else { - st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) - st.body.CloseWithError(io.EOF) - } - st.state = stateHalfClosedRemote -} - -// copyTrailersToHandlerRequest is run in the Handler's goroutine in -// its Request.Body.Read just before it gets io.EOF. -func (st *stream) copyTrailersToHandlerRequest() { - for k, vv := range st.trailer { - if _, ok := st.reqTrailer[k]; ok { - // Only copy it over it was pre-declared. - st.reqTrailer[k] = vv - } - } -} - -func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { - sc.serveG.check() - id := f.StreamID - if sc.inGoAway { - // Ignore. - return nil - } - // http://tools.ietf.org/html/rfc7540#section-5.1.1 - // Streams initiated by a client MUST use odd-numbered stream - // identifiers. [...] An endpoint that receives an unexpected - // stream identifier MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - if id%2 != 1 { - return ConnectionError(ErrCodeProtocol) - } - // A HEADERS frame can be used to create a new stream or - // send a trailer for an open one. If we already have a stream - // open, let it process its own HEADERS frame (trailers at this - // point, if it's valid). - if st := sc.streams[f.StreamID]; st != nil { - if st.resetQueued { - // We're sending RST_STREAM to close the stream, so don't bother - // processing this frame. - return nil - } - return st.processTrailerHeaders(f) - } - - // [...] The identifier of a newly established stream MUST be - // numerically greater than all streams that the initiating - // endpoint has opened or reserved. [...] An endpoint that - // receives an unexpected stream identifier MUST respond with - // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - if id <= sc.maxClientStreamID { - return ConnectionError(ErrCodeProtocol) - } - sc.maxClientStreamID = id - - if sc.idleTimer != nil { - sc.idleTimer.Stop() - } - - // http://tools.ietf.org/html/rfc7540#section-5.1.2 - // [...] Endpoints MUST NOT exceed the limit set by their peer. An - // endpoint that receives a HEADERS frame that causes their - // advertised concurrent stream limit to be exceeded MUST treat - // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR - // or REFUSED_STREAM. - if sc.curClientStreams+1 > sc.advMaxStreams { - if sc.unackedSettings == 0 { - // They should know better. - return streamError(id, ErrCodeProtocol) - } - // Assume it's a network race, where they just haven't - // received our last SETTINGS update. But actually - // this can't happen yet, because we don't yet provide - // a way for users to adjust server parameters at - // runtime. - return streamError(id, ErrCodeRefusedStream) - } - - initialState := stateOpen - if f.StreamEnded() { - initialState = stateHalfClosedRemote - } - st := sc.newStream(id, 0, initialState) - - if f.HasPriority() { - if err := checkPriority(f.StreamID, f.Priority); err != nil { - return err - } - sc.writeSched.AdjustStream(st.id, f.Priority) - } - - rw, req, err := sc.newWriterAndRequest(st, f) - if err != nil { - return err - } - st.reqTrailer = req.Trailer - if st.reqTrailer != nil { - st.trailer = make(http.Header) - } - st.body = req.Body.(*requestBody).pipe // may be nil - st.declBodyBytes = req.ContentLength - - handler := sc.handler.ServeHTTP - if f.Truncated { - // Their header list was too long. Send a 431 error. - handler = handleHeaderListTooLong - } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { - handler = new400Handler(err) - } - - // The net/http package sets the read deadline from the - // http.Server.ReadTimeout during the TLS handshake, but then - // passes the connection off to us with the deadline already - // set. Disarm it here after the request headers are read, - // similar to how the http1 server works. Here it's - // technically more like the http1 Server's ReadHeaderTimeout - // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { - sc.conn.SetReadDeadline(time.Time{}) - } - - go sc.runHandler(rw, req, handler) - return nil -} - -func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { - sc := st.sc - sc.serveG.check() - if st.gotTrailerHeader { - return ConnectionError(ErrCodeProtocol) - } - st.gotTrailerHeader = true - if !f.StreamEnded() { - return streamError(st.id, ErrCodeProtocol) - } - - if len(f.PseudoFields()) > 0 { - return streamError(st.id, ErrCodeProtocol) - } - if st.trailer != nil { - for _, hf := range f.RegularFields() { - key := sc.canonicalHeader(hf.Name) - if !ValidTrailerHeader(key) { - // TODO: send more details to the peer somehow. But http2 has - // no way to send debug data at a stream level. Discuss with - // HTTP folk. - return streamError(st.id, ErrCodeProtocol) - } - st.trailer[key] = append(st.trailer[key], hf.Value) - } - } - st.endStream() - return nil -} - -func checkPriority(streamID uint32, p PriorityParam) error { - if streamID == p.StreamDep { - // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat - // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." - // Section 5.3.3 says that a stream can depend on one of its dependencies, - // so it's only self-dependencies that are forbidden. - return streamError(streamID, ErrCodeProtocol) - } - return nil -} - -func (sc *serverConn) processPriority(f *PriorityFrame) error { - if sc.inGoAway { - return nil - } - if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { - return err - } - sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) - return nil -} - -func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { - sc.serveG.check() - if id == 0 { - panic("internal error: cannot create stream with id 0") - } - - ctx, cancelCtx := contextWithCancel(sc.baseCtx) - st := &stream{ - sc: sc, - id: id, - state: state, - ctx: ctx, - cancelCtx: cancelCtx, - } - st.cw.Init() - st.flow.conn = &sc.flow // link to conn-level counter - st.flow.add(sc.initialWindowSize) - st.inflow.conn = &sc.inflow // link to conn-level counter - st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings - - sc.streams[id] = st - sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) - if st.isPushed() { - sc.curPushedStreams++ - } else { - sc.curClientStreams++ - } - if sc.curOpenStreams() == 1 { - sc.setConnState(http.StateActive) - } - - return st -} - -func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { - sc.serveG.check() - - rp := requestParam{ - method: f.PseudoValue("method"), - scheme: f.PseudoValue("scheme"), - authority: f.PseudoValue("authority"), - path: f.PseudoValue("path"), - } - - isConnect := rp.method == "CONNECT" - if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { - // See 8.1.2.6 Malformed Requests and Responses: - // - // Malformed requests or responses that are detected - // MUST be treated as a stream error (Section 5.4.2) - // of type PROTOCOL_ERROR." - // - // 8.1.2.3 Request Pseudo-Header Fields - // "All HTTP/2 requests MUST include exactly one valid - // value for the :method, :scheme, and :path - // pseudo-header fields" - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - - bodyOpen := !f.StreamEnded() - if rp.method == "HEAD" && bodyOpen { - // HEAD requests can't have bodies - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - - rp.header = make(http.Header) - for _, hf := range f.RegularFields() { - rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) - } - if rp.authority == "" { - rp.authority = rp.header.Get("Host") - } - - rw, req, err := sc.newWriterAndRequestNoBody(st, rp) - if err != nil { - return nil, nil, err - } - if bodyOpen { - st.reqBuf = getRequestBodyBuf() - req.Body.(*requestBody).pipe = &pipe{ - b: &fixedBuffer{buf: st.reqBuf}, - } - - if vv, ok := rp.header["Content-Length"]; ok { - req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) - } else { - req.ContentLength = -1 - } - } - return rw, req, nil -} - -type requestParam struct { - method string - scheme, authority, path string - header http.Header -} - -func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { - sc.serveG.check() - - var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { - tlsState = sc.tlsState - } - - needsContinue := rp.header.Get("Expect") == "100-continue" - if needsContinue { - rp.header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range rp.header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(strings.TrimSpace(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(rp.header, "Trailer") - - var url_ *url.URL - var requestURI string - if rp.method == "CONNECT" { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(rp.path) - if err != nil { - return nil, nil, streamError(st.id, ErrCodeProtocol) - } - requestURI = rp.path - } - - body := &requestBody{ - conn: sc, - stream: st, - needsContinue: needsContinue, - } - req := &http.Request{ - Method: rp.method, - URL: url_, - RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: requestURI, - Proto: "HTTP/2.0", - ProtoMajor: 2, - ProtoMinor: 0, - TLS: tlsState, - Host: rp.authority, - Body: body, - Trailer: trailer, - } - req = requestWithContext(req, st.ctx) - - rws := responseWriterStatePool.Get().(*responseWriterState) - bwSave := rws.bw - *rws = responseWriterState{} // zero all the fields - rws.conn = sc - rws.bw = bwSave - rws.bw.Reset(chunkWriter{rws}) - rws.stream = st - rws.req = req - rws.body = body - - rw := &responseWriter{rws: rws} - return rw, req, nil -} - -var reqBodyCache = make(chan []byte, 8) - -func getRequestBodyBuf() []byte { - select { - case b := <-reqBodyCache: - return b - default: - return make([]byte, initialWindowSize) - } -} - -func putRequestBodyBuf(b []byte) { - select { - case reqBodyCache <- b: - default: - } -} - -// Run on its own goroutine. -func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { - didPanic := true - defer func() { - rw.rws.stream.cancelCtx() - if didPanic { - e := recover() - sc.writeFrameFromHandler(FrameWriteRequest{ - write: handlerPanicRST{rw.rws.stream.id}, - stream: rw.rws.stream, - }) - // Same as net/http: - if shouldLogPanic(e) { - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) - } - return - } - rw.handlerDone() - }() - handler(rw, req) - didPanic = false -} - -func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { - // 10.5.1 Limits on Header Block Size: - // .. "A server that receives a larger header block than it is - // willing to handle can send an HTTP 431 (Request Header Fields Too - // Large) status code" - const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ - w.WriteHeader(statusRequestHeaderFieldsTooLarge) - io.WriteString(w, "

HTTP Error 431

Request Header Field(s) Too Large

") -} - -// called from handler goroutines. -// h may be nil. -func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { - sc.serveG.checkNotOn() // NOT on - var errc chan error - if headerData.h != nil { - // If there's a header map (which we don't own), so we have to block on - // waiting for this frame to be written, so an http.Flush mid-handler - // writes out the correct value of keys, before a handler later potentially - // mutates it. - errc = errChanPool.Get().(chan error) - } - if err := sc.writeFrameFromHandler(FrameWriteRequest{ - write: headerData, - stream: st, - done: errc, - }); err != nil { - return err - } - if errc != nil { - select { - case err := <-errc: - errChanPool.Put(errc) - return err - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - } - } - return nil -} - -// called from handler goroutines. -func (sc *serverConn) write100ContinueHeaders(st *stream) { - sc.writeFrameFromHandler(FrameWriteRequest{ - write: write100ContinueHeadersFrame{st.id}, - stream: st, - }) -} - -// A bodyReadMsg tells the server loop that the http.Handler read n -// bytes of the DATA from the client on the given stream. -type bodyReadMsg struct { - st *stream - n int -} - -// called from handler goroutines. -// Notes that the handler for the given stream ID read n bytes of its body -// and schedules flow control tokens to be sent. -func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { - sc.serveG.checkNotOn() // NOT on - if n > 0 { - select { - case sc.bodyReadCh <- bodyReadMsg{st, n}: - case <-sc.doneServing: - } - } - if err == io.EOF { - if buf := st.reqBuf; buf != nil { - st.reqBuf = nil // shouldn't matter; field unused by other - putRequestBodyBuf(buf) - } - } -} - -func (sc *serverConn) noteBodyRead(st *stream, n int) { - sc.serveG.check() - sc.sendWindowUpdate(nil, n) // conn-level - if st.state != stateHalfClosedRemote && st.state != stateClosed { - // Don't send this WINDOW_UPDATE if the stream is closed - // remotely. - sc.sendWindowUpdate(st, n) - } -} - -// st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate(st *stream, n int) { - sc.serveG.check() - // "The legal range for the increment to the flow control - // window is 1 to 2^31-1 (2,147,483,647) octets." - // A Go Read call on 64-bit machines could in theory read - // a larger Read than this. Very unlikely, but we handle it here - // rather than elsewhere for now. - const maxUint31 = 1<<31 - 1 - for n >= maxUint31 { - sc.sendWindowUpdate32(st, maxUint31) - n -= maxUint31 - } - sc.sendWindowUpdate32(st, int32(n)) -} - -// st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { - sc.serveG.check() - if n == 0 { - return - } - if n < 0 { - panic("negative update") - } - var streamID uint32 - if st != nil { - streamID = st.id - } - sc.writeFrame(FrameWriteRequest{ - write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, - stream: st, - }) - var ok bool - if st == nil { - ok = sc.inflow.add(n) - } else { - ok = st.inflow.add(n) - } - if !ok { - panic("internal error; sent too many window updates without decrements?") - } -} - -// requestBody is the Handler's Request.Body type. -// Read and Close may be called concurrently. -type requestBody struct { - stream *stream - conn *serverConn - closed bool // for use by Close only - sawEOF bool // for use by Read only - pipe *pipe // non-nil if we have a HTTP entity message body - needsContinue bool // need to send a 100-continue -} - -func (b *requestBody) Close() error { - if b.pipe != nil && !b.closed { - b.pipe.BreakWithError(errClosedBody) - } - b.closed = true - return nil -} - -func (b *requestBody) Read(p []byte) (n int, err error) { - if b.needsContinue { - b.needsContinue = false - b.conn.write100ContinueHeaders(b.stream) - } - if b.pipe == nil || b.sawEOF { - return 0, io.EOF - } - n, err = b.pipe.Read(p) - if err == io.EOF { - b.sawEOF = true - } - if b.conn == nil && inTests { - return - } - b.conn.noteBodyReadFromHandler(b.stream, n, err) - return -} - -// responseWriter is the http.ResponseWriter implementation. It's -// intentionally small (1 pointer wide) to minimize garbage. The -// responseWriterState pointer inside is zeroed at the end of a -// request (in handlerDone) and calls on the responseWriter thereafter -// simply crash (caller's mistake), but the much larger responseWriterState -// and buffers are reused between multiple requests. -type responseWriter struct { - rws *responseWriterState -} - -// Optional http.ResponseWriter interfaces implemented. -var ( - _ http.CloseNotifier = (*responseWriter)(nil) - _ http.Flusher = (*responseWriter)(nil) - _ stringWriter = (*responseWriter)(nil) -) - -type responseWriterState struct { - // immutable within a request: - stream *stream - req *http.Request - body *requestBody // to close at end of request, if DATA frames didn't - conn *serverConn - - // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc - bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} - - // mutated by http.Handler goroutine: - handlerHeader http.Header // nil until called - snapHeader http.Header // snapshot of handlerHeader at WriteHeader time - trailers []string // set in writeChunk - status int // status code passed to WriteHeader - wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. - sentHeader bool // have we sent the header frame? - handlerDone bool // handler has finished - - sentContentLen int64 // non-zero if handler set a Content-Length header - wroteBytes int64 - - closeNotifierMu sync.Mutex // guards closeNotifierCh - closeNotifierCh chan bool // nil until first used -} - -type chunkWriter struct{ rws *responseWriterState } - -func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } - -func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } - -// declareTrailer is called for each Trailer header when the -// response header is written. It notes that a header will need to be -// written in the trailers at the end of the response. -func (rws *responseWriterState) declareTrailer(k string) { - k = http.CanonicalHeaderKey(k) - if !ValidTrailerHeader(k) { - // Forbidden by RFC 2616 14.40. - rws.conn.logf("ignoring invalid trailer %q", k) - return - } - if !strSliceContains(rws.trailers, k) { - rws.trailers = append(rws.trailers, k) - } -} - -// writeChunk writes chunks from the bufio.Writer. But because -// bufio.Writer may bypass its chunking, sometimes p may be -// arbitrarily large. -// -// writeChunk is also responsible (on the first chunk) for sending the -// HEADER response. -func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { - if !rws.wroteHeader { - rws.writeHeader(200) - } - - isHeadResp := rws.req.Method == "HEAD" - if !rws.sentHeader { - rws.sentHeader = true - var ctype, clen string - if clen = rws.snapHeader.Get("Content-Length"); clen != "" { - rws.snapHeader.Del("Content-Length") - clen64, err := strconv.ParseInt(clen, 10, 64) - if err == nil && clen64 >= 0 { - rws.sentContentLen = clen64 - } else { - clen = "" - } - } - if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { - clen = strconv.Itoa(len(p)) - } - _, hasContentType := rws.snapHeader["Content-Type"] - if !hasContentType && bodyAllowedForStatus(rws.status) { - ctype = http.DetectContentType(p) - } - var date string - if _, ok := rws.snapHeader["Date"]; !ok { - // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) - } - - for _, v := range rws.snapHeader["Trailer"] { - foreachHeaderElement(v, rws.declareTrailer) - } - - endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp - err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ - streamID: rws.stream.id, - httpResCode: rws.status, - h: rws.snapHeader, - endStream: endStream, - contentType: ctype, - contentLength: clen, - date: date, - }) - if err != nil { - return 0, err - } - if endStream { - return 0, nil - } - } - if isHeadResp { - return len(p), nil - } - if len(p) == 0 && !rws.handlerDone { - return 0, nil - } - - if rws.handlerDone { - rws.promoteUndeclaredTrailers() - } - - endStream := rws.handlerDone && !rws.hasTrailers() - if len(p) > 0 || endStream { - // only send a 0 byte DATA frame if we're ending the stream. - if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - return 0, err - } - } - - if rws.handlerDone && rws.hasTrailers() { - err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ - streamID: rws.stream.id, - h: rws.handlerHeader, - trailers: rws.trailers, - endStream: true, - }) - return len(p), err - } - return len(p), nil -} - -// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys -// that, if present, signals that the map entry is actually for -// the response trailers, and not the response headers. The prefix -// is stripped after the ServeHTTP call finishes and the values are -// sent in the trailers. -// -// This mechanism is intended only for trailers that are not known -// prior to the headers being written. If the set of trailers is fixed -// or known before the header is written, the normal Go trailers mechanism -// is preferred: -// https://golang.org/pkg/net/http/#ResponseWriter -// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers -const TrailerPrefix = "Trailer:" - -// promoteUndeclaredTrailers permits http.Handlers to set trailers -// after the header has already been flushed. Because the Go -// ResponseWriter interface has no way to set Trailers (only the -// Header), and because we didn't want to expand the ResponseWriter -// interface, and because nobody used trailers, and because RFC 2616 -// says you SHOULD (but not must) predeclare any trailers in the -// header, the official ResponseWriter rules said trailers in Go must -// be predeclared, and then we reuse the same ResponseWriter.Header() -// map to mean both Headers and Trailers. When it's time to write the -// Trailers, we pick out the fields of Headers that were declared as -// trailers. That worked for a while, until we found the first major -// user of Trailers in the wild: gRPC (using them only over http2), -// and gRPC libraries permit setting trailers mid-stream without -// predeclarnig them. So: change of plans. We still permit the old -// way, but we also permit this hack: if a Header() key begins with -// "Trailer:", the suffix of that key is a Trailer. Because ':' is an -// invalid token byte anyway, there is no ambiguity. (And it's already -// filtered out) It's mildly hacky, but not terrible. -// -// This method runs after the Handler is done and promotes any Header -// fields to be trailers. -func (rws *responseWriterState) promoteUndeclaredTrailers() { - for k, vv := range rws.handlerHeader { - if !strings.HasPrefix(k, TrailerPrefix) { - continue - } - trailerKey := strings.TrimPrefix(k, TrailerPrefix) - rws.declareTrailer(trailerKey) - rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv - } - - if len(rws.trailers) > 1 { - sorter := sorterPool.Get().(*sorter) - sorter.SortStrings(rws.trailers) - sorterPool.Put(sorter) - } -} - -func (w *responseWriter) Flush() { - rws := w.rws - if rws == nil { - panic("Header called after Handler finished") - } - if rws.bw.Buffered() > 0 { - if err := rws.bw.Flush(); err != nil { - // Ignore the error. The frame writer already knows. - return - } - } else { - // The bufio.Writer won't call chunkWriter.Write - // (writeChunk with zero bytes, so we have to do it - // ourselves to force the HTTP response header and/or - // final DATA frame (with END_STREAM) to be sent. - rws.writeChunk(nil) - } -} - -func (w *responseWriter) CloseNotify() <-chan bool { - rws := w.rws - if rws == nil { - panic("CloseNotify called after Handler finished") - } - rws.closeNotifierMu.Lock() - ch := rws.closeNotifierCh - if ch == nil { - ch = make(chan bool, 1) - rws.closeNotifierCh = ch - cw := rws.stream.cw - go func() { - cw.Wait() // wait for close - ch <- true - }() - } - rws.closeNotifierMu.Unlock() - return ch -} - -func (w *responseWriter) Header() http.Header { - rws := w.rws - if rws == nil { - panic("Header called after Handler finished") - } - if rws.handlerHeader == nil { - rws.handlerHeader = make(http.Header) - } - return rws.handlerHeader -} - -func (w *responseWriter) WriteHeader(code int) { - rws := w.rws - if rws == nil { - panic("WriteHeader called after Handler finished") - } - rws.writeHeader(code) -} - -func (rws *responseWriterState) writeHeader(code int) { - if !rws.wroteHeader { - rws.wroteHeader = true - rws.status = code - if len(rws.handlerHeader) > 0 { - rws.snapHeader = cloneHeader(rws.handlerHeader) - } - } -} - -func cloneHeader(h http.Header) http.Header { - h2 := make(http.Header, len(h)) - for k, vv := range h { - vv2 := make([]string, len(vv)) - copy(vv2, vv) - h2[k] = vv2 - } - return h2 -} - -// The Life Of A Write is like this: -// -// * Handler calls w.Write or w.WriteString -> -// * -> rws.bw (*bufio.Writer) -> -// * (Handler migth call Flush) -// * -> chunkWriter{rws} -// * -> responseWriterState.writeChunk(p []byte) -// * -> responseWriterState.writeChunk (most of the magic; see comment there) -func (w *responseWriter) Write(p []byte) (n int, err error) { - return w.write(len(p), p, "") -} - -func (w *responseWriter) WriteString(s string) (n int, err error) { - return w.write(len(s), nil, s) -} - -// either dataB or dataS is non-zero. -func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { - rws := w.rws - if rws == nil { - panic("Write called after Handler finished") - } - if !rws.wroteHeader { - w.WriteHeader(200) - } - if !bodyAllowedForStatus(rws.status) { - return 0, http.ErrBodyNotAllowed - } - rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set - if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { - // TODO: send a RST_STREAM - return 0, errors.New("http2: handler wrote more than declared Content-Length") - } - - if dataB != nil { - return rws.bw.Write(dataB) - } else { - return rws.bw.WriteString(dataS) - } -} - -func (w *responseWriter) handlerDone() { - rws := w.rws - rws.handlerDone = true - w.Flush() - w.rws = nil - responseWriterStatePool.Put(rws) -} - -// Push errors. -var ( - ErrRecursivePush = errors.New("http2: recursive push not allowed") - ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") -) - -// pushOptions is the internal version of http.PushOptions, which we -// cannot include here because it's only defined in Go 1.8 and later. -type pushOptions struct { - Method string - Header http.Header -} - -func (w *responseWriter) push(target string, opts pushOptions) error { - st := w.rws.stream - sc := st.sc - sc.serveG.checkNotOn() - - // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." - // http://tools.ietf.org/html/rfc7540#section-6.6 - if st.isPushed() { - return ErrRecursivePush - } - - // Default options. - if opts.Method == "" { - opts.Method = "GET" - } - if opts.Header == nil { - opts.Header = http.Header{} - } - wantScheme := "http" - if w.rws.req.TLS != nil { - wantScheme = "https" - } - - // Validate the request. - u, err := url.Parse(target) - if err != nil { - return err - } - if u.Scheme == "" { - if !strings.HasPrefix(target, "/") { - return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) - } - u.Scheme = wantScheme - u.Host = w.rws.req.Host - } else { - if u.Scheme != wantScheme { - return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) - } - if u.Host == "" { - return errors.New("URL must have a host") - } - } - for k := range opts.Header { - if strings.HasPrefix(k, ":") { - return fmt.Errorf("promised request headers cannot include pseudo header %q", k) - } - // These headers are meaningful only if the request has a body, - // but PUSH_PROMISE requests cannot have a body. - // http://tools.ietf.org/html/rfc7540#section-8.2 - // Also disallow Host, since the promised URL must be absolute. - switch strings.ToLower(k) { - case "content-length", "content-encoding", "trailer", "te", "expect", "host": - return fmt.Errorf("promised request headers cannot include %q", k) - } - } - if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { - return err - } - - // The RFC effectively limits promised requests to GET and HEAD: - // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" - // http://tools.ietf.org/html/rfc7540#section-8.2 - if opts.Method != "GET" && opts.Method != "HEAD" { - return fmt.Errorf("method %q must be GET or HEAD", opts.Method) - } - - msg := startPushRequest{ - parent: st, - method: opts.Method, - url: u, - header: cloneHeader(opts.Header), - done: errChanPool.Get().(chan error), - } - - select { - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - case sc.wantStartPushCh <- msg: - } - - select { - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - case err := <-msg.done: - errChanPool.Put(msg.done) - return err - } -} - -type startPushRequest struct { - parent *stream - method string - url *url.URL - header http.Header - done chan error -} - -func (sc *serverConn) startPush(msg startPushRequest) { - sc.serveG.check() - - // http://tools.ietf.org/html/rfc7540#section-6.6. - // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that - // is in either the "open" or "half-closed (remote)" state. - if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { - // responseWriter.Push checks that the stream is peer-initiaed. - msg.done <- errStreamClosed - return - } - - // http://tools.ietf.org/html/rfc7540#section-6.6. - if !sc.pushEnabled { - msg.done <- http.ErrNotSupported - return - } - - // PUSH_PROMISE frames must be sent in increasing order by stream ID, so - // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE - // is written. Once the ID is allocated, we start the request handler. - allocatePromisedID := func() (uint32, error) { - sc.serveG.check() - - // Check this again, just in case. Technically, we might have received - // an updated SETTINGS by the time we got around to writing this frame. - if !sc.pushEnabled { - return 0, http.ErrNotSupported - } - // http://tools.ietf.org/html/rfc7540#section-6.5.2. - if sc.curPushedStreams+1 > sc.clientMaxStreams { - return 0, ErrPushLimitReached - } - - // http://tools.ietf.org/html/rfc7540#section-5.1.1. - // Streams initiated by the server MUST use even-numbered identifiers. - // A server that is unable to establish a new stream identifier can send a GOAWAY - // frame so that the client is forced to open a new connection for new streams. - if sc.maxPushPromiseID+2 >= 1<<31 { - sc.startGracefulShutdown() - return 0, ErrPushLimitReached - } - sc.maxPushPromiseID += 2 - promisedID := sc.maxPushPromiseID - - // http://tools.ietf.org/html/rfc7540#section-8.2. - // Strictly speaking, the new stream should start in "reserved (local)", then - // transition to "half closed (remote)" after sending the initial HEADERS, but - // we start in "half closed (remote)" for simplicity. - // See further comments at the definition of stateHalfClosedRemote. - promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) - rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ - method: msg.method, - scheme: msg.url.Scheme, - authority: msg.url.Host, - path: msg.url.RequestURI(), - header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE - }) - if err != nil { - // Should not happen, since we've already validated msg.url. - panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) - } - - go sc.runHandler(rw, req, sc.handler.ServeHTTP) - return promisedID, nil - } - - sc.writeFrame(FrameWriteRequest{ - write: &writePushPromise{ - streamID: msg.parent.id, - method: msg.method, - url: msg.url, - h: msg.header, - allocatePromisedID: allocatePromisedID, - }, - stream: msg.parent, - done: msg.done, - }) -} - -// foreachHeaderElement splits v according to the "#rule" construction -// in RFC 2616 section 2.1 and calls fn for each non-empty element. -func foreachHeaderElement(v string, fn func(string)) { - v = textproto.TrimString(v) - if v == "" { - return - } - if !strings.Contains(v, ",") { - fn(v) - return - } - for _, f := range strings.Split(v, ",") { - if f = textproto.TrimString(f); f != "" { - fn(f) - } - } -} - -// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 -var connHeaders = []string{ - "Connection", - "Keep-Alive", - "Proxy-Connection", - "Transfer-Encoding", - "Upgrade", -} - -// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, -// per RFC 7540 Section 8.1.2.2. -// The returned error is reported to users. -func checkValidHTTP2RequestHeaders(h http.Header) error { - for _, k := range connHeaders { - if _, ok := h[k]; ok { - return fmt.Errorf("request header %q is not valid in HTTP/2", k) - } - } - te := h["Te"] - if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { - return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) - } - return nil -} - -func new400Handler(err error) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - http.Error(w, err.Error(), http.StatusBadRequest) - } -} - -// ValidTrailerHeader reports whether name is a valid header field name to appear -// in trailers. -// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 -func ValidTrailerHeader(name string) bool { - name = http.CanonicalHeaderKey(name) - if strings.HasPrefix(name, "If-") || badTrailer[name] { - return false - } - return true -} - -var badTrailer = map[string]bool{ - "Authorization": true, - "Cache-Control": true, - "Connection": true, - "Content-Encoding": true, - "Content-Length": true, - "Content-Range": true, - "Content-Type": true, - "Expect": true, - "Host": true, - "Keep-Alive": true, - "Max-Forwards": true, - "Pragma": true, - "Proxy-Authenticate": true, - "Proxy-Authorization": true, - "Proxy-Connection": true, - "Range": true, - "Realm": true, - "Te": true, - "Trailer": true, - "Transfer-Encoding": true, - "Www-Authenticate": true, -} - -// h1ServerShutdownChan returns a channel that will be closed when the -// provided *http.Server wants to shut down. -// -// This is a somewhat hacky way to get at http1 innards. It works -// when the http2 code is bundled into the net/http package in the -// standard library. The alternatives ended up making the cmd/go tool -// depend on http Servers. This is the lightest option for now. -// This is tested via the TestServeShutdown* tests in net/http. -func h1ServerShutdownChan(hs *http.Server) <-chan struct{} { - if fn := testh1ServerShutdownChan; fn != nil { - return fn(hs) - } - var x interface{} = hs - type I interface { - getDoneChan() <-chan struct{} - } - if hs, ok := x.(I); ok { - return hs.getDoneChan() - } - return nil -} - -// optional test hook for h1ServerShutdownChan. -var testh1ServerShutdownChan func(hs *http.Server) <-chan struct{} - -// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives -// disabled. See comments on h1ServerShutdownChan above for why -// the code is written this way. -func h1ServerKeepAlivesDisabled(hs *http.Server) bool { - var x interface{} = hs - type I interface { - doKeepAlives() bool - } - if hs, ok := x.(I); ok { - return !hs.doKeepAlives() - } - return false -} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go deleted file mode 100644 index 0c7e859db1ed..000000000000 --- a/vendor/golang.org/x/net/http2/transport.go +++ /dev/null @@ -1,2129 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Transport code. - -package http2 - -import ( - "bufio" - "bytes" - "compress/gzip" - "crypto/rand" - "crypto/tls" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "net" - "net/http" - "sort" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/http2/hpack" - "golang.org/x/net/idna" - "golang.org/x/net/lex/httplex" -) - -const ( - // transportDefaultConnFlow is how many connection-level flow control - // tokens we give the server at start-up, past the default 64k. - transportDefaultConnFlow = 1 << 30 - - // transportDefaultStreamFlow is how many stream-level flow - // control tokens we announce to the peer, and how many bytes - // we buffer per stream. - transportDefaultStreamFlow = 4 << 20 - - // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send - // a stream-level WINDOW_UPDATE for at a time. - transportDefaultStreamMinRefresh = 4 << 10 - - defaultUserAgent = "Go-http-client/2.0" -) - -// Transport is an HTTP/2 Transport. -// -// A Transport internally caches connections to servers. It is safe -// for concurrent use by multiple goroutines. -type Transport struct { - // DialTLS specifies an optional dial function for creating - // TLS connections for requests. - // - // If DialTLS is nil, tls.Dial is used. - // - // If the returned net.Conn has a ConnectionState method like tls.Conn, - // it will be used to set http.Response.TLS. - DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) - - // TLSClientConfig specifies the TLS configuration to use with - // tls.Client. If nil, the default configuration is used. - TLSClientConfig *tls.Config - - // ConnPool optionally specifies an alternate connection pool to use. - // If nil, the default is used. - ConnPool ClientConnPool - - // DisableCompression, if true, prevents the Transport from - // requesting compression with an "Accept-Encoding: gzip" - // request header when the Request contains no existing - // Accept-Encoding value. If the Transport requests gzip on - // its own and gets a gzipped response, it's transparently - // decoded in the Response.Body. However, if the user - // explicitly requested gzip it is not automatically - // uncompressed. - DisableCompression bool - - // AllowHTTP, if true, permits HTTP/2 requests using the insecure, - // plain-text "http" scheme. Note that this does not enable h2c support. - AllowHTTP bool - - // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to - // send in the initial settings frame. It is how many bytes - // of response headers are allow. Unlike the http2 spec, zero here - // means to use a default limit (currently 10MB). If you actually - // want to advertise an ulimited value to the peer, Transport - // interprets the highest possible value here (0xffffffff or 1<<32-1) - // to mean no limit. - MaxHeaderListSize uint32 - - // t1, if non-nil, is the standard library Transport using - // this transport. Its settings are used (but not its - // RoundTrip method, etc). - t1 *http.Transport - - connPoolOnce sync.Once - connPoolOrDef ClientConnPool // non-nil version of ConnPool -} - -func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { - return 10 << 20 - } - if t.MaxHeaderListSize == 0xffffffff { - return 0 - } - return t.MaxHeaderListSize -} - -func (t *Transport) disableCompression() bool { - return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) -} - -var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") - -// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. -// It requires Go 1.6 or later and returns an error if the net/http package is too old -// or if t1 has already been HTTP/2-enabled. -func ConfigureTransport(t1 *http.Transport) error { - _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go - return err -} - -func (t *Transport) connPool() ClientConnPool { - t.connPoolOnce.Do(t.initConnPool) - return t.connPoolOrDef -} - -func (t *Transport) initConnPool() { - if t.ConnPool != nil { - t.connPoolOrDef = t.ConnPool - } else { - t.connPoolOrDef = &clientConnPool{t: t} - } -} - -// ClientConn is the state of a single HTTP/2 client connection to an -// HTTP/2 server. -type ClientConn struct { - t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls - tlsState *tls.ConnectionState // nil only for specialized impls - singleUse bool // whether being used for a single http.Request - - // readLoop goroutine fields: - readerDone chan struct{} // closed on error - readerErr error // set before readerDone is closed - - idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer - - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow flow // our conn-level flow control quota (cs.flow is per stream) - inflow flow // peer's conn-level flow control - closed bool - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - nextStreamID uint32 - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - bw *bufio.Writer - br *bufio.Reader - fr *Framer - lastActive time.Time - // Settings from peer: (also guarded by mu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - initialWindowSize uint32 - - hbuf bytes.Buffer // HPACK encoder writes into this - henc *hpack.Encoder - freeBuf [][]byte - - wmu sync.Mutex // held while writing; acquire AFTER mu if holding both - werr error // first write error that has occurred -} - -// clientStream is the state for a single HTTP/2 stream. One of these -// is created for each Transport.RoundTrip call. -type clientStream struct { - cc *ClientConn - req *http.Request - trace *clientTrace // or nil - ID uint32 - resc chan resAndError - bufPipe pipe // buffered pipe with the flow-controlled response payload - startedWrite bool // started request body write; guarded by cc.mu - requestedGzip bool - on100 func() // optional code to run if get a 100 continue response - - flow flow // guarded by cc.mu - inflow flow // guarded by cc.mu - bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read - readErr error // sticky read error; owned by transportResponseBody.Read - stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu - didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu - - peerReset chan struct{} // closed on peer reset - resetErr error // populated before peerReset is closed - - done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu - - // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - - trailer http.Header // accumulated trailers - resTrailer *http.Header // client's Response.Trailer -} - -// awaitRequestCancel runs in its own goroutine and waits for the user -// to cancel a RoundTrip request, its context to expire, or for the -// request to be done (any way it might be removed from the cc.streams -// map: peer reset, successful completion, TCP connection breakage, -// etc) -func (cs *clientStream) awaitRequestCancel(req *http.Request) { - ctx := reqContext(req) - if req.Cancel == nil && ctx.Done() == nil { - return - } - select { - case <-req.Cancel: - cs.cancelStream() - cs.bufPipe.CloseWithError(errRequestCanceled) - case <-ctx.Done(): - cs.cancelStream() - cs.bufPipe.CloseWithError(ctx.Err()) - case <-cs.done: - } -} - -func (cs *clientStream) cancelStream() { - cs.cc.mu.Lock() - didReset := cs.didReset - cs.didReset = true - cs.cc.mu.Unlock() - - if !didReset { - cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } -} - -// checkResetOrDone reports any error sent in a RST_STREAM frame by the -// server, or errStreamClosed if the stream is complete. -func (cs *clientStream) checkResetOrDone() error { - select { - case <-cs.peerReset: - return cs.resetErr - case <-cs.done: - return errStreamClosed - default: - return nil - } -} - -func (cs *clientStream) abortRequestBodyWrite(err error) { - if err == nil { - panic("nil error") - } - cc := cs.cc - cc.mu.Lock() - cs.stopReqBody = err - cc.cond.Broadcast() - cc.mu.Unlock() -} - -type stickyErrWriter struct { - w io.Writer - err *error -} - -func (sew stickyErrWriter) Write(p []byte) (n int, err error) { - if *sew.err != nil { - return 0, *sew.err - } - n, err = sew.w.Write(p) - *sew.err = err - return -} - -var ErrNoCachedConn = errors.New("http2: no cached connection was available") - -// RoundTripOpt are options for the Transport.RoundTripOpt method. -type RoundTripOpt struct { - // OnlyCachedConn controls whether RoundTripOpt may - // create a new TCP connection. If set true and - // no cached connection is available, RoundTripOpt - // will return ErrNoCachedConn. - OnlyCachedConn bool -} - -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - return t.RoundTripOpt(req, RoundTripOpt{}) -} - -// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) -// and returns a host:port. The port 443 is added if needed. -func authorityAddr(scheme string, authority string) (addr string) { - host, port, err := net.SplitHostPort(authority) - if err != nil { // authority didn't have a port - port = "443" - if scheme == "http" { - port = "80" - } - host = authority - } - if a, err := idna.ToASCII(host); err == nil { - host = a - } - // IPv6 address literal, without a port: - if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { - return host + ":" + port - } - return net.JoinHostPort(host, port) -} - -// RoundTripOpt is like RoundTrip, but takes options. -func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { - return nil, errors.New("http2: unsupported scheme") - } - - addr := authorityAddr(req.URL.Scheme, req.URL.Host) - for { - cc, err := t.connPool().GetClientConn(req, addr) - if err != nil { - t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) - return nil, err - } - traceGotConn(req, cc) - res, err := cc.RoundTrip(req) - if err != nil { - if req, err = shouldRetryRequest(req, err); err == nil { - continue - } - } - if err != nil { - t.vlogf("RoundTrip failure: %v", err) - return nil, err - } - return res, nil - } -} - -// CloseIdleConnections closes any connections which were previously -// connected from previous requests but are now sitting idle. -// It does not interrupt any connections currently in use. -func (t *Transport) CloseIdleConnections() { - if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { - cp.closeIdleConnections() - } -} - -var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") - errClientConnGotGoAwayAfterSomeReqBody = errors.New("http2: Transport received Server's graceful shutdown GOAWAY; some request body already written") -) - -// shouldRetryRequest is called by RoundTrip when a request fails to get -// response headers. It is always called with a non-nil error. -// It returns either a request to retry (either the same request, or a -// modified clone), or an error if the request can't be replayed. -func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { - switch err { - default: - return nil, err - case errClientConnUnusable, errClientConnGotGoAway: - return req, nil - case errClientConnGotGoAwayAfterSomeReqBody: - // If the Body is nil (or http.NoBody), it's safe to reuse - // this request and its Body. - if req.Body == nil || reqBodyIsNoBody(req.Body) { - return req, nil - } - // Otherwise we depend on the Request having its GetBody - // func defined. - getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody - if getBody == nil { - return nil, errors.New("http2: Transport: peer server initiated graceful shutdown after some of Request.Body was written; define Request.GetBody to avoid this error") - } - body, err := getBody() - if err != nil { - return nil, err - } - newReq := *req - newReq.Body = body - return &newReq, nil - } -} - -func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) - if err != nil { - return nil, err - } - return t.newClientConn(tconn, singleUse) -} - -func (t *Transport) newTLSConfig(host string) *tls.Config { - cfg := new(tls.Config) - if t.TLSClientConfig != nil { - *cfg = *cloneTLSConfig(t.TLSClientConfig) - } - if !strSliceContains(cfg.NextProtos, NextProtoTLS) { - cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) - } - if cfg.ServerName == "" { - cfg.ServerName = host - } - return cfg -} - -func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { - if t.DialTLS != nil { - return t.DialTLS - } - return t.dialTLSDefault -} - -func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if !cfg.InsecureSkipVerify { - if err := cn.VerifyHostname(cfg.ServerName); err != nil { - return nil, err - } - } - state := cn.ConnectionState() - if p := state.NegotiatedProtocol; p != NextProtoTLS { - return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) - } - if !state.NegotiatedProtocolIsMutual { - return nil, errors.New("http2: could not negotiate protocol mutually") - } - return cn, nil -} - -// disableKeepAlives reports whether connections should be closed as -// soon as possible after handling the first request. -func (t *Transport) disableKeepAlives() bool { - return t.t1 != nil && t.t1.DisableKeepAlives -} - -func (t *Transport) expectContinueTimeout() time.Duration { - if t.t1 == nil { - return 0 - } - return transportExpectContinueTimeout(t.t1) -} - -func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, false) -} - -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { - cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - } - if d := t.idleConnTimeout(); d != 0 { - cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) - } - if VerboseLogs { - t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) - } - - cc.cond = sync.NewCond(&cc.mu) - cc.flow.add(int32(initialWindowSize)) - - // TODO: adjust this writer size to account for frame size + - // MTU + crypto/tls record padding. - cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) - cc.br = bufio.NewReader(c) - cc.fr = NewFramer(cc.bw, cc.br) - cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) - cc.fr.MaxHeaderListSize = t.maxHeaderListSize() - - // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on - // henc in response to SETTINGS frames? - cc.henc = hpack.NewEncoder(&cc.hbuf) - - if cs, ok := c.(connectionStater); ok { - state := cs.ConnectionState() - cc.tlsState = &state - } - - initialSettings := []Setting{ - {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxHeaderListSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) - } - - cc.bw.Write(clientPreface) - cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.add(transportDefaultConnFlow + initialWindowSize) - cc.bw.Flush() - if cc.werr != nil { - return nil, cc.werr - } - - go cc.readLoop() - return cc, nil -} - -func (cc *ClientConn) setGoAway(f *GoAwayFrame) { - cc.mu.Lock() - defer cc.mu.Unlock() - - old := cc.goAway - cc.goAway = f - - // Merge the previous and current GoAway error frames. - if cc.goAwayDebug == "" { - cc.goAwayDebug = string(f.DebugData()) - } - if old != nil && old.ErrCode != ErrCodeNo { - cc.goAway.ErrCode = old.ErrCode - } - last := f.LastStreamID - for streamID, cs := range cc.streams { - if streamID > last { - select { - case cs.resc <- resAndError{err: errClientConnGotGoAway}: - default: - } - } - } -} - -func (cc *ClientConn) CanTakeNewRequest() bool { - cc.mu.Lock() - defer cc.mu.Unlock() - return cc.canTakeNewRequestLocked() -} - -func (cc *ClientConn) canTakeNewRequestLocked() bool { - if cc.singleUse && cc.nextStreamID > 1 { - return false - } - return cc.goAway == nil && !cc.closed && - int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && - cc.nextStreamID < math.MaxInt32 -} - -// onIdleTimeout is called from a time.AfterFunc goroutine. It will -// only be called when we're idle, but because we're coming from a new -// goroutine, there could be a new request coming in at the same time, -// so this simply calls the synchronized closeIfIdle to shut down this -// connection. The timer could just call closeIfIdle, but this is more -// clear. -func (cc *ClientConn) onIdleTimeout() { - cc.closeIfIdle() -} - -func (cc *ClientConn) closeIfIdle() { - cc.mu.Lock() - if len(cc.streams) > 0 { - cc.mu.Unlock() - return - } - cc.closed = true - nextID := cc.nextStreamID - // TODO: do clients send GOAWAY too? maybe? Just Close: - cc.mu.Unlock() - - if VerboseLogs { - cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) - } - cc.tconn.Close() -} - -const maxAllocFrameSize = 512 << 10 - -// frameBuffer returns a scratch buffer suitable for writing DATA frames. -// They're capped at the min of the peer's max frame size or 512KB -// (kinda arbitrarily), but definitely capped so we don't allocate 4GB -// bufers. -func (cc *ClientConn) frameScratchBuffer() []byte { - cc.mu.Lock() - size := cc.maxFrameSize - if size > maxAllocFrameSize { - size = maxAllocFrameSize - } - for i, buf := range cc.freeBuf { - if len(buf) >= int(size) { - cc.freeBuf[i] = nil - cc.mu.Unlock() - return buf[:size] - } - } - cc.mu.Unlock() - return make([]byte, size) -} - -func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { - cc.mu.Lock() - defer cc.mu.Unlock() - const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. - if len(cc.freeBuf) < maxBufs { - cc.freeBuf = append(cc.freeBuf, buf) - return - } - for i, old := range cc.freeBuf { - if old == nil { - cc.freeBuf[i] = buf - return - } - } - // forget about it. -} - -// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not -// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. -var errRequestCanceled = errors.New("net/http: request canceled") - -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { - k = http.CanonicalHeaderKey(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return "", &badStringError{"invalid Trailer key", k} - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - return strings.Join(keys, ","), nil - } - return "", nil -} - -func (cc *ClientConn) responseHeaderTimeout() time.Duration { - if cc.t.t1 != nil { - return cc.t.t1.ResponseHeaderTimeout - } - // No way to do this (yet?) with just an http2.Transport. Probably - // no need. Request.Cancel this is the new way. We only need to support - // this for compatibility with the old http.Transport fields when - // we're doing transparent http2. - return 0 -} - -// checkConnHeaders checks whether req has any invalid connection-level headers. -// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. -// Certain headers are special-cased as okay but not transmitted later. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) - } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { - return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) - } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { - return fmt.Errorf("http2: invalid Connection request header: %q", vv) - } - return nil -} - -// actualContentLength returns a sanitized version of -// req.ContentLength, where 0 actually means zero (not unknown) and -1 -// means unknown. -func actualContentLength(req *http.Request) int64 { - if req.Body == nil { - return 0 - } - if req.ContentLength != 0 { - return req.ContentLength - } - return -1 -} - -func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { - if err := checkConnHeaders(req); err != nil { - return nil, err - } - if cc.idleTimer != nil { - cc.idleTimer.Stop() - } - - trailers, err := commaSeparatedTrailers(req) - if err != nil { - return nil, err - } - hasTrailers := trailers != "" - - cc.mu.Lock() - cc.lastActive = time.Now() - if cc.closed || !cc.canTakeNewRequestLocked() { - cc.mu.Unlock() - return nil, errClientConnUnusable - } - - body := req.Body - hasBody := body != nil - contentLen := actualContentLength(req) - - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - var requestedGzip bool - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - req.Method != "HEAD" { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - requestedGzip = true - } - - // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is - // sent by writeRequestBody below, along with any Trailers, - // again in form HEADERS{1}, CONTINUATION{0,}) - hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) - if err != nil { - cc.mu.Unlock() - return nil, err - } - - cs := cc.newStream() - cs.req = req - cs.trace = requestTrace(req) - cs.requestedGzip = requestedGzip - bodyWriter := cc.t.getBodyWriterState(cs, body) - cs.on100 = bodyWriter.on100 - - cc.wmu.Lock() - endStream := !hasBody && !hasTrailers - werr := cc.writeHeaders(cs.ID, endStream, hdrs) - cc.wmu.Unlock() - traceWroteHeaders(cs.trace) - cc.mu.Unlock() - - if werr != nil { - if hasBody { - req.Body.Close() // per RoundTripper contract - bodyWriter.cancel() - } - cc.forgetStreamID(cs.ID) - // Don't bother sending a RST_STREAM (our write already failed; - // no need to keep writing) - traceWroteRequest(cs.trace, werr) - return nil, werr - } - - var respHeaderTimer <-chan time.Time - if hasBody { - bodyWriter.scheduleBodyWrite() - } else { - traceWroteRequest(cs.trace, nil) - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } - } - - readLoopResCh := cs.resc - bodyWritten := false - ctx := reqContext(req) - - handleReadLoopResponse := func(re resAndError) (*http.Response, error) { - res := re.res - if re.err != nil || res.StatusCode > 299 { - // On error or status code 3xx, 4xx, 5xx, etc abort any - // ongoing write, assuming that the server doesn't care - // about our request body. If the server replied with 1xx or - // 2xx, however, then assume the server DOES potentially - // want our body (e.g. full-duplex streaming: - // golang.org/issue/13444). If it turns out the server - // doesn't, they'll RST_STREAM us soon enough. This is a - // heuristic to avoid adding knobs to Transport. Hopefully - // we can keep it. - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWrite) - } - if re.err != nil { - if re.err == errClientConnGotGoAway { - cc.mu.Lock() - if cs.startedWrite { - re.err = errClientConnGotGoAwayAfterSomeReqBody - } - cc.mu.Unlock() - } - cc.forgetStreamID(cs.ID) - return nil, re.err - } - res.Request = req - res.TLS = cc.tlsState - return res, nil - } - - for { - select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - case <-respHeaderTimer: - cc.forgetStreamID(cs.ID) - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - return nil, errTimeout - case <-ctx.Done(): - cc.forgetStreamID(cs.ID) - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - return nil, ctx.Err() - case <-req.Cancel: - cc.forgetStreamID(cs.ID) - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - return nil, errRequestCanceled - case <-cs.peerReset: - // processResetStream already removed the - // stream from the streams map; no need for - // forgetStreamID. - return nil, cs.resetErr - case err := <-bodyWriter.resc: - // Prefer the read loop's response, if available. Issue 16102. - select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - default: - } - if err != nil { - return nil, err - } - bodyWritten = true - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } - } - } -} - -// requires cc.wmu be held -func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { - first := true // first frame written (HEADERS is first, then CONTINUATION) - frameSize := int(cc.maxFrameSize) - for len(hdrs) > 0 && cc.werr == nil { - chunk := hdrs - if len(chunk) > frameSize { - chunk = chunk[:frameSize] - } - hdrs = hdrs[len(chunk):] - endHeaders := len(hdrs) == 0 - if first { - cc.fr.WriteHeaders(HeadersFrameParam{ - StreamID: streamID, - BlockFragment: chunk, - EndStream: endStream, - EndHeaders: endHeaders, - }) - first = false - } else { - cc.fr.WriteContinuation(streamID, endHeaders, chunk) - } - } - // TODO(bradfitz): this Flush could potentially block (as - // could the WriteHeaders call(s) above), which means they - // wouldn't respond to Request.Cancel being readable. That's - // rare, but this should probably be in a goroutine. - cc.bw.Flush() - return cc.werr -} - -// internal error values; they don't escape to callers -var ( - // abort request body write; don't send cancel - errStopReqBodyWrite = errors.New("http2: aborting request body write") - - // abort request body write, but send stream reset of cancel. - errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") -) - -func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { - cc := cs.cc - sentEnd := false // whether we sent the final DATA frame w/ END_STREAM - buf := cc.frameScratchBuffer() - defer cc.putFrameScratchBuffer(buf) - - defer func() { - traceWroteRequest(cs.trace, err) - // TODO: write h12Compare test showing whether - // Request.Body is closed by the Transport, - // and in multiple cases: server replies <=299 and >299 - // while still writing request body - cerr := bodyCloser.Close() - if err == nil { - err = cerr - } - }() - - req := cs.req - hasTrailers := req.Trailer != nil - - var sawEOF bool - for !sawEOF { - n, err := body.Read(buf) - if err == io.EOF { - sawEOF = true - err = nil - } else if err != nil { - return err - } - - remain := buf[:n] - for len(remain) > 0 && err == nil { - var allowed int32 - allowed, err = cs.awaitFlowControl(len(remain)) - switch { - case err == errStopReqBodyWrite: - return err - case err == errStopReqBodyWriteAndCancel: - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - return err - case err != nil: - return err - } - cc.wmu.Lock() - data := remain[:allowed] - remain = remain[allowed:] - sentEnd = sawEOF && len(remain) == 0 && !hasTrailers - err = cc.fr.WriteData(cs.ID, sentEnd, data) - if err == nil { - // TODO(bradfitz): this flush is for latency, not bandwidth. - // Most requests won't need this. Make this opt-in or - // opt-out? Use some heuristic on the body type? Nagel-like - // timers? Based on 'n'? Only last chunk of this for loop, - // unless flow control tokens are low? For now, always. - // If we change this, see comment below. - err = cc.bw.Flush() - } - cc.wmu.Unlock() - } - if err != nil { - return err - } - } - - if sentEnd { - // Already sent END_STREAM (which implies we have no - // trailers) and flushed, because currently all - // WriteData frames above get a flush. So we're done. - return nil - } - - var trls []byte - if hasTrailers { - cc.mu.Lock() - defer cc.mu.Unlock() - trls = cc.encodeTrailers(req) - } - - cc.wmu.Lock() - defer cc.wmu.Unlock() - - // Two ways to send END_STREAM: either with trailers, or - // with an empty DATA frame. - if len(trls) > 0 { - err = cc.writeHeaders(cs.ID, true, trls) - } else { - err = cc.fr.WriteData(cs.ID, true, nil) - } - if ferr := cc.bw.Flush(); ferr != nil && err == nil { - err = ferr - } - return err -} - -// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow -// control tokens from the server. -// It returns either the non-zero number of tokens taken or an error -// if the stream is dead. -func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { - cc := cs.cc - cc.mu.Lock() - defer cc.mu.Unlock() - for { - if cc.closed { - return 0, errClientConnClosed - } - if cs.stopReqBody != nil { - return 0, cs.stopReqBody - } - if err := cs.checkResetOrDone(); err != nil { - return 0, err - } - if a := cs.flow.available(); a > 0 { - take := a - if int(take) > maxBytes { - - take = int32(maxBytes) // can't truncate int; take is int32 - } - if take > int32(cc.maxFrameSize) { - take = int32(cc.maxFrameSize) - } - cs.flow.take(take) - return take, nil - } - cc.cond.Wait() - } -} - -type badStringError struct { - what string - str string -} - -func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } - -// requires cc.mu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { - cc.hbuf.Reset() - - host := req.Host - if host == "" { - host = req.URL.Host - } - host, err := httplex.PunycodeHostPort(host) - if err != nil { - return nil, err - } - - var path string - if req.Method != "CONNECT" { - path = req.URL.RequestURI() - if !validPseudoPath(path) { - orig := path - path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) - if !validPseudoPath(path) { - if req.URL.Opaque != "" { - return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) - } else { - return nil, fmt.Errorf("invalid request :path %q", orig) - } - } - } - } - - // Check for any invalid headers and return an error before we - // potentially pollute our hpack state. (We want to be able to - // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httplex.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { - return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) - } - } - } - - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production (see Sections 3.3 and 3.4 of - // [RFC3986]). - cc.writeHeader(":authority", host) - cc.writeHeader(":method", req.Method) - if req.Method != "CONNECT" { - cc.writeHeader(":path", path) - cc.writeHeader(":scheme", req.URL.Scheme) - } - if trailers != "" { - cc.writeHeader("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - lowKey := strings.ToLower(k) - switch lowKey { - case "host", "content-length": - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive": - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - case "user-agent": - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - } - for _, v := range vv { - cc.writeHeader(lowKey, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - cc.writeHeader("accept-encoding", "gzip") - } - if !didUA { - cc.writeHeader("user-agent", defaultUserAgent) - } - return cc.hbuf.Bytes(), nil -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false - } -} - -// requires cc.mu be held. -func (cc *ClientConn) encodeTrailers(req *http.Request) []byte { - cc.hbuf.Reset() - for k, vv := range req.Trailer { - // Transfer-Encoding, etc.. have already been filter at the - // start of RoundTrip - lowKey := strings.ToLower(k) - for _, v := range vv { - cc.writeHeader(lowKey, v) - } - } - return cc.hbuf.Bytes() -} - -func (cc *ClientConn) writeHeader(name, value string) { - if VerboseLogs { - log.Printf("http2: Transport encoding header %q = %q", name, value) - } - cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) -} - -type resAndError struct { - res *http.Response - err error -} - -// requires cc.mu be held. -func (cc *ClientConn) newStream() *clientStream { - cs := &clientStream{ - cc: cc, - ID: cc.nextStreamID, - resc: make(chan resAndError, 1), - peerReset: make(chan struct{}), - done: make(chan struct{}), - } - cs.flow.add(int32(cc.initialWindowSize)) - cs.flow.setConnFlow(&cc.flow) - cs.inflow.add(transportDefaultStreamFlow) - cs.inflow.setConnFlow(&cc.inflow) - cc.nextStreamID += 2 - cc.streams[cs.ID] = cs - return cs -} - -func (cc *ClientConn) forgetStreamID(id uint32) { - cc.streamByID(id, true) -} - -func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { - cc.mu.Lock() - defer cc.mu.Unlock() - cs := cc.streams[id] - if andRemove && cs != nil && !cc.closed { - cc.lastActive = time.Now() - delete(cc.streams, id) - if len(cc.streams) == 0 && cc.idleTimer != nil { - cc.idleTimer.Reset(cc.idleTimeout) - } - close(cs.done) - cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl - } - return cs -} - -// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. -type clientConnReadLoop struct { - cc *ClientConn - activeRes map[uint32]*clientStream // keyed by streamID - closeWhenIdle bool -} - -// readLoop runs in its own goroutine and reads and dispatches frames. -func (cc *ClientConn) readLoop() { - rl := &clientConnReadLoop{ - cc: cc, - activeRes: make(map[uint32]*clientStream), - } - - defer rl.cleanup() - cc.readerErr = rl.run() - if ce, ok := cc.readerErr.(ConnectionError); ok { - cc.wmu.Lock() - cc.fr.WriteGoAway(0, ErrCode(ce), nil) - cc.wmu.Unlock() - } -} - -// GoAwayError is returned by the Transport when the server closes the -// TCP connection after sending a GOAWAY frame. -type GoAwayError struct { - LastStreamID uint32 - ErrCode ErrCode - DebugData string -} - -func (e GoAwayError) Error() string { - return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", - e.LastStreamID, e.ErrCode, e.DebugData) -} - -func isEOFOrNetReadError(err error) bool { - if err == io.EOF { - return true - } - ne, ok := err.(*net.OpError) - return ok && ne.Op == "read" -} - -func (rl *clientConnReadLoop) cleanup() { - cc := rl.cc - defer cc.tconn.Close() - defer cc.t.connPool().MarkDead(cc) - defer close(cc.readerDone) - - if cc.idleTimer != nil { - cc.idleTimer.Stop() - } - - // Close any response bodies if the server closes prematurely. - // TODO: also do this if we've written the headers but not - // gotten a response yet. - err := cc.readerErr - cc.mu.Lock() - if cc.goAway != nil && isEOFOrNetReadError(err) { - err = GoAwayError{ - LastStreamID: cc.goAway.LastStreamID, - ErrCode: cc.goAway.ErrCode, - DebugData: cc.goAwayDebug, - } - } else if err == io.EOF { - err = io.ErrUnexpectedEOF - } - for _, cs := range rl.activeRes { - cs.bufPipe.CloseWithError(err) - } - for _, cs := range cc.streams { - select { - case cs.resc <- resAndError{err: err}: - default: - } - close(cs.done) - } - cc.closed = true - cc.cond.Broadcast() - cc.mu.Unlock() -} - -func (rl *clientConnReadLoop) run() error { - cc := rl.cc - rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse - gotReply := false // ever saw a HEADERS reply - gotSettings := false - for { - f, err := cc.fr.ReadFrame() - if err != nil { - cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) - } - if se, ok := err.(StreamError); ok { - if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { - cs.cc.writeStreamReset(cs.ID, se.Code, err) - if se.Cause == nil { - se.Cause = cc.fr.errDetail - } - rl.endStreamError(cs, se) - } - continue - } else if err != nil { - return err - } - if VerboseLogs { - cc.vlogf("http2: Transport received %s", summarizeFrame(f)) - } - if !gotSettings { - if _, ok := f.(*SettingsFrame); !ok { - cc.logf("protocol error: received %T before a SETTINGS frame", f) - return ConnectionError(ErrCodeProtocol) - } - gotSettings = true - } - maybeIdle := false // whether frame might transition us to idle - - switch f := f.(type) { - case *MetaHeadersFrame: - err = rl.processHeaders(f) - maybeIdle = true - gotReply = true - case *DataFrame: - err = rl.processData(f) - maybeIdle = true - case *GoAwayFrame: - err = rl.processGoAway(f) - maybeIdle = true - case *RSTStreamFrame: - err = rl.processResetStream(f) - maybeIdle = true - case *SettingsFrame: - err = rl.processSettings(f) - case *PushPromiseFrame: - err = rl.processPushPromise(f) - case *WindowUpdateFrame: - err = rl.processWindowUpdate(f) - case *PingFrame: - err = rl.processPing(f) - default: - cc.logf("Transport: unhandled response frame type %T", f) - } - if err != nil { - if VerboseLogs { - cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) - } - return err - } - if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { - cc.closeIfIdle() - } - } -} - -func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) - if cs == nil { - // We'd get here if we canceled a request while the - // server had its response still in flight. So if this - // was just something we canceled, ignore it. - return nil - } - if !cs.firstByte { - if cs.trace != nil { - // TODO(bradfitz): move first response byte earlier, - // when we first read the 9 byte header, not waiting - // until all the HEADERS+CONTINUATION frames have been - // merged. This works for now. - traceFirstResponseByte(cs.trace) - } - cs.firstByte = true - } - if !cs.pastHeaders { - cs.pastHeaders = true - } else { - return rl.processTrailers(cs, f) - } - - res, err := rl.handleResponse(cs, f) - if err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // Any other error type is a stream error. - cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) - cs.resc <- resAndError{err: err} - return nil // return nil from process* funcs to keep conn alive - } - if res == nil { - // (nil, nil) special case. See handleResponse docs. - return nil - } - if res.Body != noBody { - rl.activeRes[cs.ID] = cs - } - cs.resTrailer = &res.Trailer - cs.resc <- resAndError{res: res} - return nil -} - -// may return error types nil, or ConnectionError. Any other error value -// is a StreamError of type ErrCodeProtocol. The returned error in that case -// is the detail. -// -// As a special case, handleResponse may return (nil, nil) to skip the -// frame (currently only used for 100 expect continue). This special -// case is going away after Issue 13851 is fixed. -func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { - if f.Truncated { - return nil, errResponseHeaderListSize - } - - status := f.PseudoValue("status") - if status == "" { - return nil, errors.New("missing status pseudo header") - } - statusCode, err := strconv.Atoi(status) - if err != nil { - return nil, errors.New("malformed non-numeric status pseudo header") - } - - if statusCode == 100 { - traceGot100Continue(cs.trace) - if cs.on100 != nil { - cs.on100() // forces any write delay timer to fire - } - cs.pastHeaders = false // do it all again - return nil, nil - } - - header := make(http.Header) - res := &http.Response{ - Proto: "HTTP/2.0", - ProtoMajor: 2, - Header: header, - StatusCode: statusCode, - Status: status + " " + http.StatusText(statusCode), - } - for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) - if key == "Trailer" { - t := res.Trailer - if t == nil { - t = make(http.Header) - res.Trailer = t - } - foreachHeaderElement(hf.Value, func(v string) { - t[http.CanonicalHeaderKey(v)] = nil - }) - } else { - header[key] = append(header[key], hf.Value) - } - } - - streamEnded := f.StreamEnded() - isHead := cs.req.Method == "HEAD" - if !streamEnded || isHead { - res.ContentLength = -1 - if clens := res.Header["Content-Length"]; len(clens) == 1 { - if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { - res.ContentLength = clen64 - } else { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } else if len(clens) > 1 { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } - - if streamEnded || isHead { - res.Body = noBody - return res, nil - } - - buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage - cs.bufPipe = pipe{b: buf} - cs.bytesRemain = res.ContentLength - res.Body = transportResponseBody{cs} - go cs.awaitRequestCancel(cs.req) - - if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { - res.Header.Del("Content-Encoding") - res.Header.Del("Content-Length") - res.ContentLength = -1 - res.Body = &gzipReader{body: res.Body} - setResponseUncompressed(res) - } - return res, nil -} - -func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { - if cs.pastTrailers { - // Too many HEADERS frames for this stream. - return ConnectionError(ErrCodeProtocol) - } - cs.pastTrailers = true - if !f.StreamEnded() { - // We expect that any headers for trailers also - // has END_STREAM. - return ConnectionError(ErrCodeProtocol) - } - if len(f.PseudoFields()) > 0 { - // No pseudo header fields are defined for trailers. - // TODO: ConnectionError might be overly harsh? Check. - return ConnectionError(ErrCodeProtocol) - } - - trailer := make(http.Header) - for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) - trailer[key] = append(trailer[key], hf.Value) - } - cs.trailer = trailer - - rl.endStream(cs) - return nil -} - -// transportResponseBody is the concrete type of Transport.RoundTrip's -// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. -// On Close it sends RST_STREAM if EOF wasn't already seen. -type transportResponseBody struct { - cs *clientStream -} - -func (b transportResponseBody) Read(p []byte) (n int, err error) { - cs := b.cs - cc := cs.cc - - if cs.readErr != nil { - return 0, cs.readErr - } - n, err = b.cs.bufPipe.Read(p) - if cs.bytesRemain != -1 { - if int64(n) > cs.bytesRemain { - n = int(cs.bytesRemain) - if err == nil { - err = errors.New("net/http: server replied with more than declared Content-Length; truncated") - cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) - } - cs.readErr = err - return int(cs.bytesRemain), err - } - cs.bytesRemain -= int64(n) - if err == io.EOF && cs.bytesRemain > 0 { - err = io.ErrUnexpectedEOF - cs.readErr = err - return n, err - } - } - if n == 0 { - // No flow control tokens to send back. - return - } - - cc.mu.Lock() - defer cc.mu.Unlock() - - var connAdd, streamAdd int32 - // Check the conn-level first, before the stream-level. - if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { - connAdd = transportDefaultConnFlow - v - cc.inflow.add(connAdd) - } - if err == nil { // No need to refresh if the stream is over or failed. - // Consider any buffered body data (read from the conn but not - // consumed by the client) when computing flow control for this - // stream. - v := int(cs.inflow.available()) + cs.bufPipe.Len() - if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { - streamAdd = int32(transportDefaultStreamFlow - v) - cs.inflow.add(streamAdd) - } - } - if connAdd != 0 || streamAdd != 0 { - cc.wmu.Lock() - defer cc.wmu.Unlock() - if connAdd != 0 { - cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) - } - if streamAdd != 0 { - cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) - } - cc.bw.Flush() - } - return -} - -var errClosedResponseBody = errors.New("http2: response body closed") - -func (b transportResponseBody) Close() error { - cs := b.cs - cc := cs.cc - - serverSentStreamEnd := cs.bufPipe.Err() == io.EOF - unread := cs.bufPipe.Len() - - if unread > 0 || !serverSentStreamEnd { - cc.mu.Lock() - cc.wmu.Lock() - if !serverSentStreamEnd { - cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) - } - // Return connection-level flow control. - if unread > 0 { - cc.inflow.add(int32(unread)) - cc.fr.WriteWindowUpdate(0, uint32(unread)) - } - cc.bw.Flush() - cc.wmu.Unlock() - cc.mu.Unlock() - } - - cs.bufPipe.BreakWithError(errClosedResponseBody) - return nil -} - -func (rl *clientConnReadLoop) processData(f *DataFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) - data := f.Data() - if cs == nil { - cc.mu.Lock() - neverSent := cc.nextStreamID - cc.mu.Unlock() - if f.StreamID >= neverSent { - // We never asked for this. - cc.logf("http2: Transport received unsolicited DATA frame; closing connection") - return ConnectionError(ErrCodeProtocol) - } - // We probably did ask for this, but canceled. Just ignore it. - // TODO: be stricter here? only silently ignore things which - // we canceled, but not things which were closed normally - // by the peer? Tough without accumulating too much state. - - // But at least return their flow control: - if f.Length > 0 { - cc.mu.Lock() - cc.inflow.add(int32(f.Length)) - cc.mu.Unlock() - - cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(f.Length)) - cc.bw.Flush() - cc.wmu.Unlock() - } - return nil - } - if f.Length > 0 { - if len(data) > 0 && cs.bufPipe.b == nil { - // Data frame after it's already closed? - cc.logf("http2: Transport received DATA frame for closed stream; closing connection") - return ConnectionError(ErrCodeProtocol) - } - - // Check connection-level flow control. - cc.mu.Lock() - if cs.inflow.available() >= int32(f.Length) { - cs.inflow.take(int32(f.Length)) - } else { - cc.mu.Unlock() - return ConnectionError(ErrCodeFlowControl) - } - // Return any padded flow control now, since we won't - // refund it later on body reads. - if pad := int32(f.Length) - int32(len(data)); pad > 0 { - cs.inflow.add(pad) - cc.inflow.add(pad) - cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(pad)) - cc.fr.WriteWindowUpdate(cs.ID, uint32(pad)) - cc.bw.Flush() - cc.wmu.Unlock() - } - didReset := cs.didReset - cc.mu.Unlock() - - if len(data) > 0 && !didReset { - if _, err := cs.bufPipe.Write(data); err != nil { - rl.endStreamError(cs, err) - return err - } - } - } - - if f.StreamEnded() { - rl.endStream(cs) - } - return nil -} - -var errInvalidTrailers = errors.New("http2: invalid trailers") - -func (rl *clientConnReadLoop) endStream(cs *clientStream) { - // TODO: check that any declared content-length matches, like - // server.go's (*stream).endStream method. - rl.endStreamError(cs, nil) -} - -func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { - var code func() - if err == nil { - err = io.EOF - code = cs.copyTrailers - } - cs.bufPipe.closeWithErrorAndCode(err, code) - delete(rl.activeRes, cs.ID) - if isConnectionCloseRequest(cs.req) { - rl.closeWhenIdle = true - } - - select { - case cs.resc <- resAndError{err: err}: - default: - } -} - -func (cs *clientStream) copyTrailers() { - for k, vv := range cs.trailer { - t := cs.resTrailer - if *t == nil { - *t = make(http.Header) - } - (*t)[k] = vv - } -} - -func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { - cc := rl.cc - cc.t.connPool().MarkDead(cc) - if f.ErrCode != 0 { - // TODO: deal with GOAWAY more. particularly the error code - cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) - } - cc.setGoAway(f) - return nil -} - -func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { - cc := rl.cc - cc.mu.Lock() - defer cc.mu.Unlock() - - if f.IsAck() { - if cc.wantSettingsAck { - cc.wantSettingsAck = false - return nil - } - return ConnectionError(ErrCodeProtocol) - } - - err := f.ForeachSetting(func(s Setting) error { - switch s.ID { - case SettingMaxFrameSize: - cc.maxFrameSize = s.Val - case SettingMaxConcurrentStreams: - cc.maxConcurrentStreams = s.Val - case SettingInitialWindowSize: - // Values above the maximum flow-control - // window size of 2^31-1 MUST be treated as a - // connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR. - if s.Val > math.MaxInt32 { - return ConnectionError(ErrCodeFlowControl) - } - - // Adjust flow control of currently-open - // frames by the difference of the old initial - // window size and this one. - delta := int32(s.Val) - int32(cc.initialWindowSize) - for _, cs := range cc.streams { - cs.flow.add(delta) - } - cc.cond.Broadcast() - - cc.initialWindowSize = s.Val - default: - // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. - cc.vlogf("Unhandled Setting: %v", s) - } - return nil - }) - if err != nil { - return err - } - - cc.wmu.Lock() - defer cc.wmu.Unlock() - - cc.fr.WriteSettingsAck() - cc.bw.Flush() - return cc.werr -} - -func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, false) - if f.StreamID != 0 && cs == nil { - return nil - } - - cc.mu.Lock() - defer cc.mu.Unlock() - - fl := &cc.flow - if cs != nil { - fl = &cs.flow - } - if !fl.add(int32(f.Increment)) { - return ConnectionError(ErrCodeFlowControl) - } - cc.cond.Broadcast() - return nil -} - -func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.cc.streamByID(f.StreamID, true) - if cs == nil { - // TODO: return error if server tries to RST_STEAM an idle stream - return nil - } - select { - case <-cs.peerReset: - // Already reset. - // This is the only goroutine - // which closes this, so there - // isn't a race. - default: - err := streamError(cs.ID, f.ErrCode) - cs.resetErr = err - close(cs.peerReset) - cs.bufPipe.CloseWithError(err) - cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl - } - delete(rl.activeRes, cs.ID) - return nil -} - -// Ping sends a PING frame to the server and waits for the ack. -// Public implementation is in go17.go and not_go17.go -func (cc *ClientConn) ping(ctx contextContext) error { - c := make(chan struct{}) - // Generate a random payload - var p [8]byte - for { - if _, err := rand.Read(p[:]); err != nil { - return err - } - cc.mu.Lock() - // check for dup before insert - if _, found := cc.pings[p]; !found { - cc.pings[p] = c - cc.mu.Unlock() - break - } - cc.mu.Unlock() - } - cc.wmu.Lock() - if err := cc.fr.WritePing(false, p); err != nil { - cc.wmu.Unlock() - return err - } - if err := cc.bw.Flush(); err != nil { - cc.wmu.Unlock() - return err - } - cc.wmu.Unlock() - select { - case <-c: - return nil - case <-ctx.Done(): - return ctx.Err() - case <-cc.readerDone: - // connection closed - return cc.readerErr - } -} - -func (rl *clientConnReadLoop) processPing(f *PingFrame) error { - if f.IsAck() { - cc := rl.cc - cc.mu.Lock() - defer cc.mu.Unlock() - // If ack, notify listener if any - if c, ok := cc.pings[f.Data]; ok { - close(c) - delete(cc.pings, f.Data) - } - return nil - } - cc := rl.cc - cc.wmu.Lock() - defer cc.wmu.Unlock() - if err := cc.fr.WritePing(true, f.Data); err != nil { - return err - } - return cc.bw.Flush() -} - -func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { - // We told the peer we don't want them. - // Spec says: - // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH - // setting of the peer endpoint is set to 0. An endpoint that - // has set this setting and has received acknowledgement MUST - // treat the receipt of a PUSH_PROMISE frame as a connection - // error (Section 5.4.1) of type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) -} - -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { - // TODO: map err to more interesting error codes, once the - // HTTP community comes up with some. But currently for - // RST_STREAM there's no equivalent to GOAWAY frame's debug - // data, and the error codes are all pretty vague ("cancel"). - cc.wmu.Lock() - cc.fr.WriteRSTStream(streamID, code) - cc.bw.Flush() - cc.wmu.Unlock() -} - -var ( - errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") - errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") -) - -func (cc *ClientConn) logf(format string, args ...interface{}) { - cc.t.logf(format, args...) -} - -func (cc *ClientConn) vlogf(format string, args ...interface{}) { - cc.t.vlogf(format, args...) -} - -func (t *Transport) vlogf(format string, args ...interface{}) { - if VerboseLogs { - t.logf(format, args...) - } -} - -func (t *Transport) logf(format string, args ...interface{}) { - log.Printf(format, args...) -} - -var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) - -func strSliceContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} - -type erringRoundTripper struct{ err error } - -func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } - -// gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read -type gzipReader struct { - body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error -} - -func (gz *gzipReader) Read(p []byte) (n int, err error) { - if gz.zerr != nil { - return 0, gz.zerr - } - if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err - } - } - return gz.zr.Read(p) -} - -func (gz *gzipReader) Close() error { - return gz.body.Close() -} - -type errorReader struct{ err error } - -func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } - -// bodyWriterState encapsulates various state around the Transport's writing -// of the request body, particularly regarding doing delayed writes of the body -// when the request contains "Expect: 100-continue". -type bodyWriterState struct { - cs *clientStream - timer *time.Timer // if non-nil, we're doing a delayed write - fnonce *sync.Once // to call fn with - fn func() // the code to run in the goroutine, writing the body - resc chan error // result of fn's execution - delay time.Duration // how long we should delay a delayed write for -} - -func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { - s.cs = cs - if body == nil { - return - } - resc := make(chan error, 1) - s.resc = resc - s.fn = func() { - cs.cc.mu.Lock() - cs.startedWrite = true - cs.cc.mu.Unlock() - resc <- cs.writeRequestBody(body, cs.req.Body) - } - s.delay = t.expectContinueTimeout() - if s.delay == 0 || - !httplex.HeaderValuesContainsToken( - cs.req.Header["Expect"], - "100-continue") { - return - } - s.fnonce = new(sync.Once) - - // Arm the timer with a very large duration, which we'll - // intentionally lower later. It has to be large now because - // we need a handle to it before writing the headers, but the - // s.delay value is defined to not start until after the - // request headers were written. - const hugeDuration = 365 * 24 * time.Hour - s.timer = time.AfterFunc(hugeDuration, func() { - s.fnonce.Do(s.fn) - }) - return -} - -func (s bodyWriterState) cancel() { - if s.timer != nil { - s.timer.Stop() - } -} - -func (s bodyWriterState) on100() { - if s.timer == nil { - // If we didn't do a delayed write, ignore the server's - // bogus 100 continue response. - return - } - s.timer.Stop() - go func() { s.fnonce.Do(s.fn) }() -} - -// scheduleBodyWrite starts writing the body, either immediately (in -// the common case) or after the delay timeout. It should not be -// called until after the headers have been written. -func (s bodyWriterState) scheduleBodyWrite() { - if s.timer == nil { - // We're not doing a delayed write (see - // getBodyWriterState), so just start the writing - // goroutine immediately. - go s.fn() - return - } - traceWait100Continue(s.cs.trace) - if s.timer.Stop() { - s.timer.Reset(s.delay) - } -} - -// isConnectionCloseRequest reports whether req should use its own -// connection for a single request and then close the connection. -func isConnectionCloseRequest(req *http.Request) bool { - return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") -} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go deleted file mode 100644 index 6b0dfae319a4..000000000000 --- a/vendor/golang.org/x/net/http2/write.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "fmt" - "log" - "net/http" - "net/url" - "time" - - "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" -) - -// writeFramer is implemented by any type that is used to write frames. -type writeFramer interface { - writeFrame(writeContext) error - - // staysWithinBuffer reports whether this writer promises that - // it will only write less than or equal to size bytes, and it - // won't Flush the write context. - staysWithinBuffer(size int) bool -} - -// writeContext is the interface needed by the various frame writer -// types below. All the writeFrame methods below are scheduled via the -// frame writing scheduler (see writeScheduler in writesched.go). -// -// This interface is implemented by *serverConn. -// -// TODO: decide whether to a) use this in the client code (which didn't -// end up using this yet, because it has a simpler design, not -// currently implementing priorities), or b) delete this and -// make the server code a bit more concrete. -type writeContext interface { - Framer() *Framer - Flush() error - CloseConn() error - // HeaderEncoder returns an HPACK encoder that writes to the - // returned buffer. - HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) -} - -// writeEndsStream reports whether w writes a frame that will transition -// the stream to a half-closed local state. This returns false for RST_STREAM, -// which closes the entire stream (not just the local half). -func writeEndsStream(w writeFramer) bool { - switch v := w.(type) { - case *writeData: - return v.endStream - case *writeResHeaders: - return v.endStream - case nil: - // This can only happen if the caller reuses w after it's - // been intentionally nil'ed out to prevent use. Keep this - // here to catch future refactoring breaking it. - panic("writeEndsStream called on nil writeFramer") - } - return false -} - -type flushFrameWriter struct{} - -func (flushFrameWriter) writeFrame(ctx writeContext) error { - return ctx.Flush() -} - -func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } - -type writeSettings []Setting - -func (s writeSettings) staysWithinBuffer(max int) bool { - const settingSize = 6 // uint16 + uint32 - return frameHeaderLen+settingSize*len(s) <= max - -} - -func (s writeSettings) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteSettings([]Setting(s)...) -} - -type writeGoAway struct { - maxStreamID uint32 - code ErrCode -} - -func (p *writeGoAway) writeFrame(ctx writeContext) error { - err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) - if p.code != 0 { - ctx.Flush() // ignore error: we're hanging up on them anyway - time.Sleep(50 * time.Millisecond) - ctx.CloseConn() - } - return err -} - -func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes - -type writeData struct { - streamID uint32 - p []byte - endStream bool -} - -func (w *writeData) String() string { - return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) -} - -func (w *writeData) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) -} - -func (w *writeData) staysWithinBuffer(max int) bool { - return frameHeaderLen+len(w.p) <= max -} - -// handlerPanicRST is the message sent from handler goroutines when -// the handler panics. -type handlerPanicRST struct { - StreamID uint32 -} - -func (hp handlerPanicRST) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) -} - -func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -func (se StreamError) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) -} - -func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -type writePingAck struct{ pf *PingFrame } - -func (w writePingAck) writeFrame(ctx writeContext) error { - return ctx.Framer().WritePing(true, w.pf.Data) -} - -func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } - -type writeSettingsAck struct{} - -func (writeSettingsAck) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteSettingsAck() -} - -func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } - -// splitHeaderBlock splits headerBlock into fragments so that each fragment fits -// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true -// for the first/last fragment, respectively. -func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { - // For now we're lazy and just pick the minimum MAX_FRAME_SIZE - // that all peers must support (16KB). Later we could care - // more and send larger frames if the peer advertised it, but - // there's little point. Most headers are small anyway (so we - // generally won't have CONTINUATION frames), and extra frames - // only waste 9 bytes anyway. - const maxFrameSize = 16384 - - first := true - for len(headerBlock) > 0 { - frag := headerBlock - if len(frag) > maxFrameSize { - frag = frag[:maxFrameSize] - } - headerBlock = headerBlock[len(frag):] - if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { - return err - } - first = false - } - return nil -} - -// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames -// for HTTP response headers or trailers from a server handler. -type writeResHeaders struct { - streamID uint32 - httpResCode int // 0 means no ":status" line - h http.Header // may be nil - trailers []string // if non-nil, which keys of h to write. nil means all. - endStream bool - - date string - contentType string - contentLength string -} - -func encKV(enc *hpack.Encoder, k, v string) { - if VerboseLogs { - log.Printf("http2: server encoding header %q = %q", k, v) - } - enc.WriteField(hpack.HeaderField{Name: k, Value: v}) -} - -func (w *writeResHeaders) staysWithinBuffer(max int) bool { - // TODO: this is a common one. It'd be nice to return true - // here and get into the fast path if we could be clever and - // calculate the size fast enough, or at least a conservative - // uppper bound that usually fires. (Maybe if w.h and - // w.trailers are nil, so we don't need to enumerate it.) - // Otherwise I'm afraid that just calculating the length to - // answer this question would be slower than the ~2µs benefit. - return false -} - -func (w *writeResHeaders) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - - if w.httpResCode != 0 { - encKV(enc, ":status", httpCodeString(w.httpResCode)) - } - - encodeHeaders(enc, w.h, w.trailers) - - if w.contentType != "" { - encKV(enc, "content-type", w.contentType) - } - if w.contentLength != "" { - encKV(enc, "content-length", w.contentLength) - } - if w.date != "" { - encKV(enc, "date", w.date) - } - - headerBlock := buf.Bytes() - if len(headerBlock) == 0 && w.trailers == nil { - panic("unexpected empty hpack") - } - - return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) -} - -func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { - if firstFrag { - return ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: frag, - EndStream: w.endStream, - EndHeaders: lastFrag, - }) - } else { - return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) - } -} - -// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. -type writePushPromise struct { - streamID uint32 // pusher stream - method string // for :method - url *url.URL // for :scheme, :authority, :path - h http.Header - - // Creates an ID for a pushed stream. This runs on serveG just before - // the frame is written. The returned ID is copied to promisedID. - allocatePromisedID func() (uint32, error) - promisedID uint32 -} - -func (w *writePushPromise) staysWithinBuffer(max int) bool { - // TODO: see writeResHeaders.staysWithinBuffer - return false -} - -func (w *writePushPromise) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - - encKV(enc, ":method", w.method) - encKV(enc, ":scheme", w.url.Scheme) - encKV(enc, ":authority", w.url.Host) - encKV(enc, ":path", w.url.RequestURI()) - encodeHeaders(enc, w.h, nil) - - headerBlock := buf.Bytes() - if len(headerBlock) == 0 { - panic("unexpected empty hpack") - } - - return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) -} - -func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { - if firstFrag { - return ctx.Framer().WritePushPromise(PushPromiseParam{ - StreamID: w.streamID, - PromiseID: w.promisedID, - BlockFragment: frag, - EndHeaders: lastFrag, - }) - } else { - return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) - } -} - -type write100ContinueHeadersFrame struct { - streamID uint32 -} - -func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - encKV(enc, ":status", "100") - return ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: buf.Bytes(), - EndStream: false, - EndHeaders: true, - }) -} - -func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { - // Sloppy but conservative: - return 9+2*(len(":status")+len("100")) <= max -} - -type writeWindowUpdate struct { - streamID uint32 // or 0 for conn-level - n uint32 -} - -func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) -} - -// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) -// is encoded only only if k is in keys. -func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { - if keys == nil { - sorter := sorterPool.Get().(*sorter) - // Using defer here, since the returned keys from the - // sorter.Keys method is only valid until the sorter - // is returned: - defer sorterPool.Put(sorter) - keys = sorter.Keys(h) - } - for _, k := range keys { - vv := h[k] - k = lowerHeader(k) - if !validWireHeaderFieldName(k) { - // Skip it as backup paranoia. Per - // golang.org/issue/14048, these should - // already be rejected at a higher level. - continue - } - isTE := k == "transfer-encoding" - for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { - // TODO: return an error? golang.org/issue/14048 - // For now just omit it. - continue - } - // TODO: more of "8.1.2.2 Connection-Specific Header Fields" - if isTE && v != "trailers" { - continue - } - encKV(enc, k, v) - } - } -} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go deleted file mode 100644 index 4fe307307351..000000000000 --- a/vendor/golang.org/x/net/http2/writesched.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import "fmt" - -// WriteScheduler is the interface implemented by HTTP/2 write schedulers. -// Methods are never called concurrently. -type WriteScheduler interface { - // OpenStream opens a new stream in the write scheduler. - // It is illegal to call this with streamID=0 or with a streamID that is - // already open -- the call may panic. - OpenStream(streamID uint32, options OpenStreamOptions) - - // CloseStream closes a stream in the write scheduler. Any frames queued on - // this stream should be discarded. It is illegal to call this on a stream - // that is not open -- the call may panic. - CloseStream(streamID uint32) - - // AdjustStream adjusts the priority of the given stream. This may be called - // on a stream that has not yet been opened or has been closed. Note that - // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: - // https://tools.ietf.org/html/rfc7540#section-5.1 - AdjustStream(streamID uint32, priority PriorityParam) - - // Push queues a frame in the scheduler. In most cases, this will not be - // called with wr.StreamID()!=0 unless that stream is currently open. The one - // exception is RST_STREAM frames, which may be sent on idle or closed streams. - Push(wr FrameWriteRequest) - - // Pop dequeues the next frame to write. Returns false if no frames can - // be written. Frames with a given wr.StreamID() are Pop'd in the same - // order they are Push'd. - Pop() (wr FrameWriteRequest, ok bool) -} - -// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. -type OpenStreamOptions struct { - // PusherID is zero if the stream was initiated by the client. Otherwise, - // PusherID names the stream that pushed the newly opened stream. - PusherID uint32 -} - -// FrameWriteRequest is a request to write a frame. -type FrameWriteRequest struct { - // write is the interface value that does the writing, once the - // WriteScheduler has selected this frame to write. The write - // functions are all defined in write.go. - write writeFramer - - // stream is the stream on which this frame will be written. - // nil for non-stream frames like PING and SETTINGS. - stream *stream - - // done, if non-nil, must be a buffered channel with space for - // 1 message and is sent the return value from write (or an - // earlier error) when the frame has been written. - done chan error -} - -// StreamID returns the id of the stream this frame will be written to. -// 0 is used for non-stream frames such as PING and SETTINGS. -func (wr FrameWriteRequest) StreamID() uint32 { - if wr.stream == nil { - if se, ok := wr.write.(StreamError); ok { - // (*serverConn).resetStream doesn't set - // stream because it doesn't necessarily have - // one. So special case this type of write - // message. - return se.StreamID - } - return 0 - } - return wr.stream.id -} - -// DataSize returns the number of flow control bytes that must be consumed -// to write this entire frame. This is 0 for non-DATA frames. -func (wr FrameWriteRequest) DataSize() int { - if wd, ok := wr.write.(*writeData); ok { - return len(wd.p) - } - return 0 -} - -// Consume consumes min(n, available) bytes from this frame, where available -// is the number of flow control bytes available on the stream. Consume returns -// 0, 1, or 2 frames, where the integer return value gives the number of frames -// returned. -// -// If flow control prevents consuming any bytes, this returns (_, _, 0). If -// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this -// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and -// 'rest' contains the remaining bytes. The consumed bytes are deducted from the -// underlying stream's flow control budget. -func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { - var empty FrameWriteRequest - - // Non-DATA frames are always consumed whole. - wd, ok := wr.write.(*writeData) - if !ok || len(wd.p) == 0 { - return wr, empty, 1 - } - - // Might need to split after applying limits. - allowed := wr.stream.flow.available() - if n < allowed { - allowed = n - } - if wr.stream.sc.maxFrameSize < allowed { - allowed = wr.stream.sc.maxFrameSize - } - if allowed <= 0 { - return empty, empty, 0 - } - if len(wd.p) > int(allowed) { - wr.stream.flow.take(allowed) - consumed := FrameWriteRequest{ - stream: wr.stream, - write: &writeData{ - streamID: wd.streamID, - p: wd.p[:allowed], - // Even if the original had endStream set, there - // are bytes remaining because len(wd.p) > allowed, - // so we know endStream is false. - endStream: false, - }, - // Our caller is blocking on the final DATA frame, not - // this intermediate frame, so no need to wait. - done: nil, - } - rest := FrameWriteRequest{ - stream: wr.stream, - write: &writeData{ - streamID: wd.streamID, - p: wd.p[allowed:], - endStream: wd.endStream, - }, - done: wr.done, - } - return consumed, rest, 2 - } - - // The frame is consumed whole. - // NB: This cast cannot overflow because allowed is <= math.MaxInt32. - wr.stream.flow.take(int32(len(wd.p))) - return wr, empty, 1 -} - -// String is for debugging only. -func (wr FrameWriteRequest) String() string { - var des string - if s, ok := wr.write.(fmt.Stringer); ok { - des = s.String() - } else { - des = fmt.Sprintf("%T", wr.write) - } - return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) -} - -// replyToWriter sends err to wr.done and panics if the send must block -// This does nothing if wr.done is nil. -func (wr *FrameWriteRequest) replyToWriter(err error) { - if wr.done == nil { - return - } - select { - case wr.done <- err: - default: - panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) - } - wr.write = nil // prevent use (assume it's tainted after wr.done send) -} - -// writeQueue is used by implementations of WriteScheduler. -type writeQueue struct { - s []FrameWriteRequest -} - -func (q *writeQueue) empty() bool { return len(q.s) == 0 } - -func (q *writeQueue) push(wr FrameWriteRequest) { - q.s = append(q.s, wr) -} - -func (q *writeQueue) shift() FrameWriteRequest { - if len(q.s) == 0 { - panic("invalid use of queue") - } - wr := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = FrameWriteRequest{} - q.s = q.s[:len(q.s)-1] - return wr -} - -// consume consumes up to n bytes from q.s[0]. If the frame is -// entirely consumed, it is removed from the queue. If the frame -// is partially consumed, the frame is kept with the consumed -// bytes removed. Returns true iff any bytes were consumed. -func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { - if len(q.s) == 0 { - return FrameWriteRequest{}, false - } - consumed, rest, numresult := q.s[0].Consume(n) - switch numresult { - case 0: - return FrameWriteRequest{}, false - case 1: - q.shift() - case 2: - q.s[0] = rest - } - return consumed, true -} - -type writeQueuePool []*writeQueue - -// put inserts an unused writeQueue into the pool. -func (p *writeQueuePool) put(q *writeQueue) { - for i := range q.s { - q.s[i] = FrameWriteRequest{} - } - q.s = q.s[:0] - *p = append(*p, q) -} - -// get returns an empty writeQueue. -func (p *writeQueuePool) get() *writeQueue { - ln := len(*p) - if ln == 0 { - return new(writeQueue) - } - x := ln - 1 - q := (*p)[x] - (*p)[x] = nil - *p = (*p)[:x] - return q -} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go deleted file mode 100644 index 01132721bddd..000000000000 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "fmt" - "math" - "sort" -) - -// RFC 7540, Section 5.3.5: the default weight is 16. -const priorityDefaultWeight = 15 // 16 = 15 + 1 - -// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. -type PriorityWriteSchedulerConfig struct { - // MaxClosedNodesInTree controls the maximum number of closed streams to - // retain in the priority tree. Setting this to zero saves a small amount - // of memory at the cost of performance. - // - // See RFC 7540, Section 5.3.4: - // "It is possible for a stream to become closed while prioritization - // information ... is in transit. ... This potentially creates suboptimal - // prioritization, since the stream could be given a priority that is - // different from what is intended. To avoid these problems, an endpoint - // SHOULD retain stream prioritization state for a period after streams - // become closed. The longer state is retained, the lower the chance that - // streams are assigned incorrect or default priority values." - MaxClosedNodesInTree int - - // MaxIdleNodesInTree controls the maximum number of idle streams to - // retain in the priority tree. Setting this to zero saves a small amount - // of memory at the cost of performance. - // - // See RFC 7540, Section 5.3.4: - // Similarly, streams that are in the "idle" state can be assigned - // priority or become a parent of other streams. This allows for the - // creation of a grouping node in the dependency tree, which enables - // more flexible expressions of priority. Idle streams begin with a - // default priority (Section 5.3.5). - MaxIdleNodesInTree int - - // ThrottleOutOfOrderWrites enables write throttling to help ensure that - // data is delivered in priority order. This works around a race where - // stream B depends on stream A and both streams are about to call Write - // to queue DATA frames. If B wins the race, a naive scheduler would eagerly - // write as much data from B as possible, but this is suboptimal because A - // is a higher-priority stream. With throttling enabled, we write a small - // amount of data from B to minimize the amount of bandwidth that B can - // steal from A. - ThrottleOutOfOrderWrites bool -} - -// NewPriorityWriteScheduler constructs a WriteScheduler that schedules -// frames by following HTTP/2 priorities as described in RFC 7340 Section 5.3. -// If cfg is nil, default options are used. -func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { - if cfg == nil { - // For justification of these defaults, see: - // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY - cfg = &PriorityWriteSchedulerConfig{ - MaxClosedNodesInTree: 10, - MaxIdleNodesInTree: 10, - ThrottleOutOfOrderWrites: false, - } - } - - ws := &priorityWriteScheduler{ - nodes: make(map[uint32]*priorityNode), - maxClosedNodesInTree: cfg.MaxClosedNodesInTree, - maxIdleNodesInTree: cfg.MaxIdleNodesInTree, - enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, - } - ws.nodes[0] = &ws.root - if cfg.ThrottleOutOfOrderWrites { - ws.writeThrottleLimit = 1024 - } else { - ws.writeThrottleLimit = math.MaxInt32 - } - return ws -} - -type priorityNodeState int - -const ( - priorityNodeOpen priorityNodeState = iota - priorityNodeClosed - priorityNodeIdle -) - -// priorityNode is a node in an HTTP/2 priority tree. -// Each node is associated with a single stream ID. -// See RFC 7540, Section 5.3. -type priorityNode struct { - q writeQueue // queue of pending frames to write - id uint32 // id of the stream, or 0 for the root of the tree - weight uint8 // the actual weight is weight+1, so the value is in [1,256] - state priorityNodeState // open | closed | idle - bytes int64 // number of bytes written by this node, or 0 if closed - subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree - - // These links form the priority tree. - parent *priorityNode - kids *priorityNode // start of the kids list - prev, next *priorityNode // doubly-linked list of siblings -} - -func (n *priorityNode) setParent(parent *priorityNode) { - if n == parent { - panic("setParent to self") - } - if n.parent == parent { - return - } - // Unlink from current parent. - if parent := n.parent; parent != nil { - if n.prev == nil { - parent.kids = n.next - } else { - n.prev.next = n.next - } - if n.next != nil { - n.next.prev = n.prev - } - } - // Link to new parent. - // If parent=nil, remove n from the tree. - // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). - n.parent = parent - if parent == nil { - n.next = nil - n.prev = nil - } else { - n.next = parent.kids - n.prev = nil - if n.next != nil { - n.next.prev = n - } - parent.kids = n - } -} - -func (n *priorityNode) addBytes(b int64) { - n.bytes += b - for ; n != nil; n = n.parent { - n.subtreeBytes += b - } -} - -// walkReadyInOrder iterates over the tree in priority order, calling f for each node -// with a non-empty write queue. When f returns true, this funcion returns true and the -// walk halts. tmp is used as scratch space for sorting. -// -// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true -// if any ancestor p of n is still open (ignoring the root node). -func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { - if !n.q.empty() && f(n, openParent) { - return true - } - if n.kids == nil { - return false - } - - // Don't consider the root "open" when updating openParent since - // we can't send data frames on the root stream (only control frames). - if n.id != 0 { - openParent = openParent || (n.state == priorityNodeOpen) - } - - // Common case: only one kid or all kids have the same weight. - // Some clients don't use weights; other clients (like web browsers) - // use mostly-linear priority trees. - w := n.kids.weight - needSort := false - for k := n.kids.next; k != nil; k = k.next { - if k.weight != w { - needSort = true - break - } - } - if !needSort { - for k := n.kids; k != nil; k = k.next { - if k.walkReadyInOrder(openParent, tmp, f) { - return true - } - } - return false - } - - // Uncommon case: sort the child nodes. We remove the kids from the parent, - // then re-insert after sorting so we can reuse tmp for future sort calls. - *tmp = (*tmp)[:0] - for n.kids != nil { - *tmp = append(*tmp, n.kids) - n.kids.setParent(nil) - } - sort.Sort(sortPriorityNodeSiblings(*tmp)) - for i := len(*tmp) - 1; i >= 0; i-- { - (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids - } - for k := n.kids; k != nil; k = k.next { - if k.walkReadyInOrder(openParent, tmp, f) { - return true - } - } - return false -} - -type sortPriorityNodeSiblings []*priorityNode - -func (z sortPriorityNodeSiblings) Len() int { return len(z) } -func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } -func (z sortPriorityNodeSiblings) Less(i, k int) bool { - // Prefer the subtree that has sent fewer bytes relative to its weight. - // See sections 5.3.2 and 5.3.4. - wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) - wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) - if bi == 0 && bk == 0 { - return wi >= wk - } - if bk == 0 { - return false - } - return bi/bk <= wi/wk -} - -type priorityWriteScheduler struct { - // root is the root of the priority tree, where root.id = 0. - // The root queues control frames that are not associated with any stream. - root priorityNode - - // nodes maps stream ids to priority tree nodes. - nodes map[uint32]*priorityNode - - // maxID is the maximum stream id in nodes. - maxID uint32 - - // lists of nodes that have been closed or are idle, but are kept in - // the tree for improved prioritization. When the lengths exceed either - // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. - closedNodes, idleNodes []*priorityNode - - // From the config. - maxClosedNodesInTree int - maxIdleNodesInTree int - writeThrottleLimit int32 - enableWriteThrottle bool - - // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. - tmp []*priorityNode - - // pool of empty queues for reuse. - queuePool writeQueuePool -} - -func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { - // The stream may be currently idle but cannot be opened or closed. - if curr := ws.nodes[streamID]; curr != nil { - if curr.state != priorityNodeIdle { - panic(fmt.Sprintf("stream %d already opened", streamID)) - } - curr.state = priorityNodeOpen - return - } - - // RFC 7540, Section 5.3.5: - // "All streams are initially assigned a non-exclusive dependency on stream 0x0. - // Pushed streams initially depend on their associated stream. In both cases, - // streams are assigned a default weight of 16." - parent := ws.nodes[options.PusherID] - if parent == nil { - parent = &ws.root - } - n := &priorityNode{ - q: *ws.queuePool.get(), - id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeOpen, - } - n.setParent(parent) - ws.nodes[streamID] = n - if streamID > ws.maxID { - ws.maxID = streamID - } -} - -func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { - if streamID == 0 { - panic("violation of WriteScheduler interface: cannot close stream 0") - } - if ws.nodes[streamID] == nil { - panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) - } - if ws.nodes[streamID].state != priorityNodeOpen { - panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) - } - - n := ws.nodes[streamID] - n.state = priorityNodeClosed - n.addBytes(-n.bytes) - - q := n.q - ws.queuePool.put(&q) - n.q.s = nil - if ws.maxClosedNodesInTree > 0 { - ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) - } else { - ws.removeNode(n) - } -} - -func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { - if streamID == 0 { - panic("adjustPriority on root") - } - - // If streamID does not exist, there are two cases: - // - A closed stream that has been removed (this will have ID <= maxID) - // - An idle stream that is being used for "grouping" (this will have ID > maxID) - n := ws.nodes[streamID] - if n == nil { - if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { - return - } - ws.maxID = streamID - n = &priorityNode{ - q: *ws.queuePool.get(), - id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeIdle, - } - n.setParent(&ws.root) - ws.nodes[streamID] = n - ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) - } - - // Section 5.3.1: A dependency on a stream that is not currently in the tree - // results in that stream being given a default priority (Section 5.3.5). - parent := ws.nodes[priority.StreamDep] - if parent == nil { - n.setParent(&ws.root) - n.weight = priorityDefaultWeight - return - } - - // Ignore if the client tries to make a node its own parent. - if n == parent { - return - } - - // Section 5.3.3: - // "If a stream is made dependent on one of its own dependencies, the - // formerly dependent stream is first moved to be dependent on the - // reprioritized stream's previous parent. The moved dependency retains - // its weight." - // - // That is: if parent depends on n, move parent to depend on n.parent. - for x := parent.parent; x != nil; x = x.parent { - if x == n { - parent.setParent(n.parent) - break - } - } - - // Section 5.3.3: The exclusive flag causes the stream to become the sole - // dependency of its parent stream, causing other dependencies to become - // dependent on the exclusive stream. - if priority.Exclusive { - k := parent.kids - for k != nil { - next := k.next - if k != n { - k.setParent(n) - } - k = next - } - } - - n.setParent(parent) - n.weight = priority.Weight -} - -func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { - var n *priorityNode - if id := wr.StreamID(); id == 0 { - n = &ws.root - } else { - n = ws.nodes[id] - if n == nil { - // id is an idle or closed stream. wr should not be a HEADERS or - // DATA frame. However, wr can be a RST_STREAM. In this case, we - // push wr onto the root, rather than creating a new priorityNode, - // since RST_STREAM is tiny and the stream's priority is unknown - // anyway. See issue #17919. - if wr.DataSize() > 0 { - panic("add DATA on non-open stream") - } - n = &ws.root - } - } - n.q.push(wr) -} - -func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { - ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { - limit := int32(math.MaxInt32) - if openParent { - limit = ws.writeThrottleLimit - } - wr, ok = n.q.consume(limit) - if !ok { - return false - } - n.addBytes(int64(wr.DataSize())) - // If B depends on A and B continuously has data available but A - // does not, gradually increase the throttling limit to allow B to - // steal more and more bandwidth from A. - if openParent { - ws.writeThrottleLimit += 1024 - if ws.writeThrottleLimit < 0 { - ws.writeThrottleLimit = math.MaxInt32 - } - } else if ws.enableWriteThrottle { - ws.writeThrottleLimit = 1024 - } - return true - }) - return wr, ok -} - -func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { - if maxSize == 0 { - return - } - if len(*list) == maxSize { - // Remove the oldest node, then shift left. - ws.removeNode((*list)[0]) - x := (*list)[1:] - copy(*list, x) - *list = (*list)[:len(x)] - } - *list = append(*list, n) -} - -func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for k := n.kids; k != nil; k = k.next { - k.setParent(n.parent) - } - n.setParent(nil) - delete(ws.nodes, n.id) -} diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go deleted file mode 100644 index 36d7919f16ac..000000000000 --- a/vendor/golang.org/x/net/http2/writesched_random.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import "math" - -// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 -// priorities. Control frames like SETTINGS and PING are written before DATA -// frames, but if no control frames are queued and multiple streams have queued -// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. -func NewRandomWriteScheduler() WriteScheduler { - return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} -} - -type randomWriteScheduler struct { - // zero are frames not associated with a specific stream. - zero writeQueue - - // sq contains the stream-specific queues, keyed by stream ID. - // When a stream is idle or closed, it's deleted from the map. - sq map[uint32]*writeQueue - - // pool of empty queues for reuse. - queuePool writeQueuePool -} - -func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { - // no-op: idle streams are not tracked -} - -func (ws *randomWriteScheduler) CloseStream(streamID uint32) { - q, ok := ws.sq[streamID] - if !ok { - return - } - delete(ws.sq, streamID) - ws.queuePool.put(q) -} - -func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { - // no-op: priorities are ignored -} - -func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { - id := wr.StreamID() - if id == 0 { - ws.zero.push(wr) - return - } - q, ok := ws.sq[id] - if !ok { - q = ws.queuePool.get() - ws.sq[id] = q - } - q.push(wr) -} - -func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { - // Control frames first. - if !ws.zero.empty() { - return ws.zero.shift(), true - } - // Iterate over all non-idle streams until finding one that can be consumed. - for _, q := range ws.sq { - if wr, ok := q.consume(math.MaxInt32); ok { - return wr, true - } - } - return FrameWriteRequest{}, false -} diff --git a/vendor/golang.org/x/net/idna/BUILD b/vendor/golang.org/x/net/idna/BUILD deleted file mode 100644 index 7cf6df24bc3f..000000000000 --- a/vendor/golang.org/x/net/idna/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "idna.go", - "punycode.go", - ], - importpath = "golang.org/x/net/idna", -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go deleted file mode 100644 index 35ff39d81b15..000000000000 --- a/vendor/golang.org/x/net/idna/idna.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package idna implements IDNA2008 (Internationalized Domain Names for -// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and -// RFC 5894. -package idna - -import ( - "strings" - "unicode/utf8" -) - -// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or -// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11 - -// acePrefix is the ASCII Compatible Encoding prefix. -const acePrefix = "xn--" - -// ToASCII converts a domain or domain label to its ASCII form. For example, -// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and -// ToASCII("golang") is "golang". -func ToASCII(s string) (string, error) { - if ascii(s) { - return s, nil - } - labels := strings.Split(s, ".") - for i, label := range labels { - if !ascii(label) { - a, err := encode(acePrefix, label) - if err != nil { - return "", err - } - labels[i] = a - } - } - return strings.Join(labels, "."), nil -} - -// ToUnicode converts a domain or domain label to its Unicode form. For example, -// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and -// ToUnicode("golang") is "golang". -func ToUnicode(s string) (string, error) { - if !strings.Contains(s, acePrefix) { - return s, nil - } - labels := strings.Split(s, ".") - for i, label := range labels { - if strings.HasPrefix(label, acePrefix) { - u, err := decode(label[len(acePrefix):]) - if err != nil { - return "", err - } - labels[i] = u - } - } - return strings.Join(labels, "."), nil -} - -func ascii(s string) bool { - for i := 0; i < len(s); i++ { - if s[i] >= utf8.RuneSelf { - return false - } - } - return true -} diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go deleted file mode 100644 index 92e733f6a7b1..000000000000 --- a/vendor/golang.org/x/net/idna/punycode.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package idna - -// This file implements the Punycode algorithm from RFC 3492. - -import ( - "fmt" - "math" - "strings" - "unicode/utf8" -) - -// These parameter values are specified in section 5. -// -// All computation is done with int32s, so that overflow behavior is identical -// regardless of whether int is 32-bit or 64-bit. -const ( - base int32 = 36 - damp int32 = 700 - initialBias int32 = 72 - initialN int32 = 128 - skew int32 = 38 - tmax int32 = 26 - tmin int32 = 1 -) - -// decode decodes a string as specified in section 6.2. -func decode(encoded string) (string, error) { - if encoded == "" { - return "", nil - } - pos := 1 + strings.LastIndex(encoded, "-") - if pos == 1 { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - if pos == len(encoded) { - return encoded[:len(encoded)-1], nil - } - output := make([]rune, 0, len(encoded)) - if pos != 0 { - for _, r := range encoded[:pos-1] { - output = append(output, r) - } - } - i, n, bias := int32(0), initialN, initialBias - for pos < len(encoded) { - oldI, w := i, int32(1) - for k := base; ; k += base { - if pos == len(encoded) { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - digit, ok := decodeDigit(encoded[pos]) - if !ok { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - pos++ - i += digit * w - if i < 0 { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - t := k - bias - if t < tmin { - t = tmin - } else if t > tmax { - t = tmax - } - if digit < t { - break - } - w *= base - t - if w >= math.MaxInt32/base { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - } - x := int32(len(output) + 1) - bias = adapt(i-oldI, x, oldI == 0) - n += i / x - i %= x - if n > utf8.MaxRune || len(output) >= 1024 { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - output = append(output, 0) - copy(output[i+1:], output[i:]) - output[i] = n - i++ - } - return string(output), nil -} - -// encode encodes a string as specified in section 6.3 and prepends prefix to -// the result. -// -// The "while h < length(input)" line in the specification becomes "for -// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. -func encode(prefix, s string) (string, error) { - output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) - copy(output, prefix) - delta, n, bias := int32(0), initialN, initialBias - b, remaining := int32(0), int32(0) - for _, r := range s { - if r < 0x80 { - b++ - output = append(output, byte(r)) - } else { - remaining++ - } - } - h := b - if b > 0 { - output = append(output, '-') - } - for remaining != 0 { - m := int32(0x7fffffff) - for _, r := range s { - if m > r && r >= n { - m = r - } - } - delta += (m - n) * (h + 1) - if delta < 0 { - return "", fmt.Errorf("idna: invalid label %q", s) - } - n = m - for _, r := range s { - if r < n { - delta++ - if delta < 0 { - return "", fmt.Errorf("idna: invalid label %q", s) - } - continue - } - if r > n { - continue - } - q := delta - for k := base; ; k += base { - t := k - bias - if t < tmin { - t = tmin - } else if t > tmax { - t = tmax - } - if q < t { - break - } - output = append(output, encodeDigit(t+(q-t)%(base-t))) - q = (q - t) / (base - t) - } - output = append(output, encodeDigit(q)) - bias = adapt(delta, h+1, h == b) - delta = 0 - h++ - remaining-- - } - delta++ - n++ - } - return string(output), nil -} - -func decodeDigit(x byte) (digit int32, ok bool) { - switch { - case '0' <= x && x <= '9': - return int32(x - ('0' - 26)), true - case 'A' <= x && x <= 'Z': - return int32(x - 'A'), true - case 'a' <= x && x <= 'z': - return int32(x - 'a'), true - } - return 0, false -} - -func encodeDigit(digit int32) byte { - switch { - case 0 <= digit && digit < 26: - return byte(digit + 'a') - case 26 <= digit && digit < 36: - return byte(digit + ('0' - 26)) - } - panic("idna: internal error in punycode encoding") -} - -// adapt is the bias adaptation function specified in section 6.1. -func adapt(delta, numPoints int32, firstTime bool) int32 { - if firstTime { - delta /= damp - } else { - delta /= 2 - } - delta += delta / numPoints - k := int32(0) - for delta > ((base-tmin)*tmax)/2 { - delta /= base - tmin - k += base - } - return k + (base-tmin+1)*delta/(delta+skew) -} diff --git a/vendor/golang.org/x/net/internal/timeseries/BUILD b/vendor/golang.org/x/net/internal/timeseries/BUILD deleted file mode 100644 index b70f95f8c39b..000000000000 --- a/vendor/golang.org/x/net/internal/timeseries/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["timeseries.go"], - importpath = "golang.org/x/net/internal/timeseries", -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go deleted file mode 100644 index 3f90b7300d45..000000000000 --- a/vendor/golang.org/x/net/internal/timeseries/timeseries.go +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package timeseries implements a time series structure for stats collection. -package timeseries - -import ( - "fmt" - "log" - "time" -) - -const ( - timeSeriesNumBuckets = 64 - minuteHourSeriesNumBuckets = 60 -) - -var timeSeriesResolutions = []time.Duration{ - 1 * time.Second, - 10 * time.Second, - 1 * time.Minute, - 10 * time.Minute, - 1 * time.Hour, - 6 * time.Hour, - 24 * time.Hour, // 1 day - 7 * 24 * time.Hour, // 1 week - 4 * 7 * 24 * time.Hour, // 4 weeks - 16 * 7 * 24 * time.Hour, // 16 weeks -} - -var minuteHourSeriesResolutions = []time.Duration{ - 1 * time.Second, - 1 * time.Minute, -} - -// An Observable is a kind of data that can be aggregated in a time series. -type Observable interface { - Multiply(ratio float64) // Multiplies the data in self by a given ratio - Add(other Observable) // Adds the data from a different observation to self - Clear() // Clears the observation so it can be reused. - CopyFrom(other Observable) // Copies the contents of a given observation to self -} - -// Float attaches the methods of Observable to a float64. -type Float float64 - -// NewFloat returns a Float. -func NewFloat() Observable { - f := Float(0) - return &f -} - -// String returns the float as a string. -func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } - -// Value returns the float's value. -func (f *Float) Value() float64 { return float64(*f) } - -func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } - -func (f *Float) Add(other Observable) { - o := other.(*Float) - *f += *o -} - -func (f *Float) Clear() { *f = 0 } - -func (f *Float) CopyFrom(other Observable) { - o := other.(*Float) - *f = *o -} - -// A Clock tells the current time. -type Clock interface { - Time() time.Time -} - -type defaultClock int - -var defaultClockInstance defaultClock - -func (defaultClock) Time() time.Time { return time.Now() } - -// Information kept per level. Each level consists of a circular list of -// observations. The start of the level may be derived from end and the -// len(buckets) * sizeInMillis. -type tsLevel struct { - oldest int // index to oldest bucketed Observable - newest int // index to newest bucketed Observable - end time.Time // end timestamp for this level - size time.Duration // duration of the bucketed Observable - buckets []Observable // collections of observations - provider func() Observable // used for creating new Observable -} - -func (l *tsLevel) Clear() { - l.oldest = 0 - l.newest = len(l.buckets) - 1 - l.end = time.Time{} - for i := range l.buckets { - if l.buckets[i] != nil { - l.buckets[i].Clear() - l.buckets[i] = nil - } - } -} - -func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { - l.size = size - l.provider = f - l.buckets = make([]Observable, numBuckets) -} - -// Keeps a sequence of levels. Each level is responsible for storing data at -// a given resolution. For example, the first level stores data at a one -// minute resolution while the second level stores data at a one hour -// resolution. - -// Each level is represented by a sequence of buckets. Each bucket spans an -// interval equal to the resolution of the level. New observations are added -// to the last bucket. -type timeSeries struct { - provider func() Observable // make more Observable - numBuckets int // number of buckets in each level - levels []*tsLevel // levels of bucketed Observable - lastAdd time.Time // time of last Observable tracked - total Observable // convenient aggregation of all Observable - clock Clock // Clock for getting current time - pending Observable // observations not yet bucketed - pendingTime time.Time // what time are we keeping in pending - dirty bool // if there are pending observations -} - -// init initializes a level according to the supplied criteria. -func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { - ts.provider = f - ts.numBuckets = numBuckets - ts.clock = clock - ts.levels = make([]*tsLevel, len(resolutions)) - - for i := range resolutions { - if i > 0 && resolutions[i-1] >= resolutions[i] { - log.Print("timeseries: resolutions must be monotonically increasing") - break - } - newLevel := new(tsLevel) - newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) - ts.levels[i] = newLevel - } - - ts.Clear() -} - -// Clear removes all observations from the time series. -func (ts *timeSeries) Clear() { - ts.lastAdd = time.Time{} - ts.total = ts.resetObservation(ts.total) - ts.pending = ts.resetObservation(ts.pending) - ts.pendingTime = time.Time{} - ts.dirty = false - - for i := range ts.levels { - ts.levels[i].Clear() - } -} - -// Add records an observation at the current time. -func (ts *timeSeries) Add(observation Observable) { - ts.AddWithTime(observation, ts.clock.Time()) -} - -// AddWithTime records an observation at the specified time. -func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { - - smallBucketDuration := ts.levels[0].size - - if t.After(ts.lastAdd) { - ts.lastAdd = t - } - - if t.After(ts.pendingTime) { - ts.advance(t) - ts.mergePendingUpdates() - ts.pendingTime = ts.levels[0].end - ts.pending.CopyFrom(observation) - ts.dirty = true - } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { - // The observation is close enough to go into the pending bucket. - // This compensates for clock skewing and small scheduling delays - // by letting the update stay in the fast path. - ts.pending.Add(observation) - ts.dirty = true - } else { - ts.mergeValue(observation, t) - } -} - -// mergeValue inserts the observation at the specified time in the past into all levels. -func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { - for _, level := range ts.levels { - index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) - if 0 <= index && index < ts.numBuckets { - bucketNumber := (level.oldest + index) % ts.numBuckets - if level.buckets[bucketNumber] == nil { - level.buckets[bucketNumber] = level.provider() - } - level.buckets[bucketNumber].Add(observation) - } - } - ts.total.Add(observation) -} - -// mergePendingUpdates applies the pending updates into all levels. -func (ts *timeSeries) mergePendingUpdates() { - if ts.dirty { - ts.mergeValue(ts.pending, ts.pendingTime) - ts.pending = ts.resetObservation(ts.pending) - ts.dirty = false - } -} - -// advance cycles the buckets at each level until the latest bucket in -// each level can hold the time specified. -func (ts *timeSeries) advance(t time.Time) { - if !t.After(ts.levels[0].end) { - return - } - for i := 0; i < len(ts.levels); i++ { - level := ts.levels[i] - if !level.end.Before(t) { - break - } - - // If the time is sufficiently far, just clear the level and advance - // directly. - if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { - for _, b := range level.buckets { - ts.resetObservation(b) - } - level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) - } - - for t.After(level.end) { - level.end = level.end.Add(level.size) - level.newest = level.oldest - level.oldest = (level.oldest + 1) % ts.numBuckets - ts.resetObservation(level.buckets[level.newest]) - } - - t = level.end - } -} - -// Latest returns the sum of the num latest buckets from the level. -func (ts *timeSeries) Latest(level, num int) Observable { - now := ts.clock.Time() - if ts.levels[0].end.Before(now) { - ts.advance(now) - } - - ts.mergePendingUpdates() - - result := ts.provider() - l := ts.levels[level] - index := l.newest - - for i := 0; i < num; i++ { - if l.buckets[index] != nil { - result.Add(l.buckets[index]) - } - if index == 0 { - index = ts.numBuckets - } - index-- - } - - return result -} - -// LatestBuckets returns a copy of the num latest buckets from level. -func (ts *timeSeries) LatestBuckets(level, num int) []Observable { - if level < 0 || level > len(ts.levels) { - log.Print("timeseries: bad level argument: ", level) - return nil - } - if num < 0 || num >= ts.numBuckets { - log.Print("timeseries: bad num argument: ", num) - return nil - } - - results := make([]Observable, num) - now := ts.clock.Time() - if ts.levels[0].end.Before(now) { - ts.advance(now) - } - - ts.mergePendingUpdates() - - l := ts.levels[level] - index := l.newest - - for i := 0; i < num; i++ { - result := ts.provider() - results[i] = result - if l.buckets[index] != nil { - result.CopyFrom(l.buckets[index]) - } - - if index == 0 { - index = ts.numBuckets - } - index -= 1 - } - return results -} - -// ScaleBy updates observations by scaling by factor. -func (ts *timeSeries) ScaleBy(factor float64) { - for _, l := range ts.levels { - for i := 0; i < ts.numBuckets; i++ { - l.buckets[i].Multiply(factor) - } - } - - ts.total.Multiply(factor) - ts.pending.Multiply(factor) -} - -// Range returns the sum of observations added over the specified time range. -// If start or finish times don't fall on bucket boundaries of the same -// level, then return values are approximate answers. -func (ts *timeSeries) Range(start, finish time.Time) Observable { - return ts.ComputeRange(start, finish, 1)[0] -} - -// Recent returns the sum of observations from the last delta. -func (ts *timeSeries) Recent(delta time.Duration) Observable { - now := ts.clock.Time() - return ts.Range(now.Add(-delta), now) -} - -// Total returns the total of all observations. -func (ts *timeSeries) Total() Observable { - ts.mergePendingUpdates() - return ts.total -} - -// ComputeRange computes a specified number of values into a slice using -// the observations recorded over the specified time period. The return -// values are approximate if the start or finish times don't fall on the -// bucket boundaries at the same level or if the number of buckets spanning -// the range is not an integral multiple of num. -func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { - if start.After(finish) { - log.Printf("timeseries: start > finish, %v>%v", start, finish) - return nil - } - - if num < 0 { - log.Printf("timeseries: num < 0, %v", num) - return nil - } - - results := make([]Observable, num) - - for _, l := range ts.levels { - if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { - ts.extract(l, start, finish, num, results) - return results - } - } - - // Failed to find a level that covers the desired range. So just - // extract from the last level, even if it doesn't cover the entire - // desired range. - ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) - - return results -} - -// RecentList returns the specified number of values in slice over the most -// recent time period of the specified range. -func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { - if delta < 0 { - return nil - } - now := ts.clock.Time() - return ts.ComputeRange(now.Add(-delta), now, num) -} - -// extract returns a slice of specified number of observations from a given -// level over a given range. -func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { - ts.mergePendingUpdates() - - srcInterval := l.size - dstInterval := finish.Sub(start) / time.Duration(num) - dstStart := start - srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) - - srcIndex := 0 - - // Where should scanning start? - if dstStart.After(srcStart) { - advance := dstStart.Sub(srcStart) / srcInterval - srcIndex += int(advance) - srcStart = srcStart.Add(advance * srcInterval) - } - - // The i'th value is computed as show below. - // interval = (finish/start)/num - // i'th value = sum of observation in range - // [ start + i * interval, - // start + (i + 1) * interval ) - for i := 0; i < num; i++ { - results[i] = ts.resetObservation(results[i]) - dstEnd := dstStart.Add(dstInterval) - for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { - srcEnd := srcStart.Add(srcInterval) - if srcEnd.After(ts.lastAdd) { - srcEnd = ts.lastAdd - } - - if !srcEnd.Before(dstStart) { - srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] - if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { - // dst completely contains src. - if srcValue != nil { - results[i].Add(srcValue) - } - } else { - // dst partially overlaps src. - overlapStart := maxTime(srcStart, dstStart) - overlapEnd := minTime(srcEnd, dstEnd) - base := srcEnd.Sub(srcStart) - fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() - - used := ts.provider() - if srcValue != nil { - used.CopyFrom(srcValue) - } - used.Multiply(fraction) - results[i].Add(used) - } - - if srcEnd.After(dstEnd) { - break - } - } - srcIndex++ - srcStart = srcStart.Add(srcInterval) - } - dstStart = dstStart.Add(dstInterval) - } -} - -// resetObservation clears the content so the struct may be reused. -func (ts *timeSeries) resetObservation(observation Observable) Observable { - if observation == nil { - observation = ts.provider() - } else { - observation.Clear() - } - return observation -} - -// TimeSeries tracks data at granularities from 1 second to 16 weeks. -type TimeSeries struct { - timeSeries -} - -// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. -func NewTimeSeries(f func() Observable) *TimeSeries { - return NewTimeSeriesWithClock(f, defaultClockInstance) -} - -// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for -// assigning timestamps. -func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { - ts := new(TimeSeries) - ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) - return ts -} - -// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. -type MinuteHourSeries struct { - timeSeries -} - -// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. -func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { - return NewMinuteHourSeriesWithClock(f, defaultClockInstance) -} - -// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for -// assigning timestamps. -func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { - ts := new(MinuteHourSeries) - ts.timeSeries.init(minuteHourSeriesResolutions, f, - minuteHourSeriesNumBuckets, clock) - return ts -} - -func (ts *MinuteHourSeries) Minute() Observable { - return ts.timeSeries.Latest(0, 60) -} - -func (ts *MinuteHourSeries) Hour() Observable { - return ts.timeSeries.Latest(1, 60) -} - -func minTime(a, b time.Time) time.Time { - if a.Before(b) { - return a - } - return b -} - -func maxTime(a, b time.Time) time.Time { - if a.After(b) { - return a - } - return b -} diff --git a/vendor/golang.org/x/net/lex/httplex/BUILD b/vendor/golang.org/x/net/lex/httplex/BUILD deleted file mode 100644 index b6b9bd83edf8..000000000000 --- a/vendor/golang.org/x/net/lex/httplex/BUILD +++ /dev/null @@ -1,26 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["httplex.go"], - importpath = "golang.org/x/net/lex/httplex", - deps = ["//vendor/golang.org/x/net/idna:go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/lex/httplex/httplex.go deleted file mode 100644 index 20f2b8940bae..000000000000 --- a/vendor/golang.org/x/net/lex/httplex/httplex.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package httplex contains rules around lexical matters of various -// HTTP-related specifications. -// -// This package is shared by the standard library (which vendors it) -// and x/net/http2. It comes with no API stability promise. -package httplex - -import ( - "net" - "strings" - "unicode/utf8" - - "golang.org/x/net/idna" -) - -var isTokenTable = [127]bool{ - '!': true, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '*': true, - '+': true, - '-': true, - '.': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'W': true, - 'V': true, - 'X': true, - 'Y': true, - 'Z': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '|': true, - '~': true, -} - -func IsTokenRune(r rune) bool { - i := int(r) - return i < len(isTokenTable) && isTokenTable[i] -} - -func isNotToken(r rune) bool { - return !IsTokenRune(r) -} - -// HeaderValuesContainsToken reports whether any string in values -// contains the provided token, ASCII case-insensitively. -func HeaderValuesContainsToken(values []string, token string) bool { - for _, v := range values { - if headerValueContainsToken(v, token) { - return true - } - } - return false -} - -// isOWS reports whether b is an optional whitespace byte, as defined -// by RFC 7230 section 3.2.3. -func isOWS(b byte) bool { return b == ' ' || b == '\t' } - -// trimOWS returns x with all optional whitespace removes from the -// beginning and end. -func trimOWS(x string) string { - // TODO: consider using strings.Trim(x, " \t") instead, - // if and when it's fast enough. See issue 10292. - // But this ASCII-only code will probably always beat UTF-8 - // aware code. - for len(x) > 0 && isOWS(x[0]) { - x = x[1:] - } - for len(x) > 0 && isOWS(x[len(x)-1]) { - x = x[:len(x)-1] - } - return x -} - -// headerValueContainsToken reports whether v (assumed to be a -// 0#element, in the ABNF extension described in RFC 7230 section 7) -// contains token amongst its comma-separated tokens, ASCII -// case-insensitively. -func headerValueContainsToken(v string, token string) bool { - v = trimOWS(v) - if comma := strings.IndexByte(v, ','); comma != -1 { - return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) - } - return tokenEqual(v, token) -} - -// lowerASCII returns the ASCII lowercase version of b. -func lowerASCII(b byte) byte { - if 'A' <= b && b <= 'Z' { - return b + ('a' - 'A') - } - return b -} - -// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. -func tokenEqual(t1, t2 string) bool { - if len(t1) != len(t2) { - return false - } - for i, b := range t1 { - if b >= utf8.RuneSelf { - // No UTF-8 or non-ASCII allowed in tokens. - return false - } - if lowerASCII(byte(b)) != lowerASCII(t2[i]) { - return false - } - } - return true -} - -// isLWS reports whether b is linear white space, according -// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// LWS = [CRLF] 1*( SP | HT ) -func isLWS(b byte) bool { return b == ' ' || b == '\t' } - -// isCTL reports whether b is a control byte, according -// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// CTL = -func isCTL(b byte) bool { - const del = 0x7f // a CTL - return b < ' ' || b == del -} - -// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. -// HTTP/2 imposes the additional restriction that uppercase ASCII -// letters are not allowed. -// -// RFC 7230 says: -// header-field = field-name ":" OWS field-value OWS -// field-name = token -// token = 1*tchar -// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / -// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA -func ValidHeaderFieldName(v string) bool { - if len(v) == 0 { - return false - } - for _, r := range v { - if !IsTokenRune(r) { - return false - } - } - return true -} - -// ValidHostHeader reports whether h is a valid host header. -func ValidHostHeader(h string) bool { - // The latest spec is actually this: - // - // http://tools.ietf.org/html/rfc7230#section-5.4 - // Host = uri-host [ ":" port ] - // - // Where uri-host is: - // http://tools.ietf.org/html/rfc3986#section-3.2.2 - // - // But we're going to be much more lenient for now and just - // search for any byte that's not a valid byte in any of those - // expressions. - for i := 0; i < len(h); i++ { - if !validHostByte[h[i]] { - return false - } - } - return true -} - -// See the validHostHeader comment. -var validHostByte = [256]bool{ - '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, - '8': true, '9': true, - - 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, - 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, - 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, - 'y': true, 'z': true, - - 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, - 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, - 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, - 'Y': true, 'Z': true, - - '!': true, // sub-delims - '$': true, // sub-delims - '%': true, // pct-encoded (and used in IPv6 zones) - '&': true, // sub-delims - '(': true, // sub-delims - ')': true, // sub-delims - '*': true, // sub-delims - '+': true, // sub-delims - ',': true, // sub-delims - '-': true, // unreserved - '.': true, // unreserved - ':': true, // IPv6address + Host expression's optional port - ';': true, // sub-delims - '=': true, // sub-delims - '[': true, - '\'': true, // sub-delims - ']': true, - '_': true, // unreserved - '~': true, // unreserved -} - -// ValidHeaderFieldValue reports whether v is a valid "field-value" according to -// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : -// -// message-header = field-name ":" [ field-value ] -// field-value = *( field-content | LWS ) -// field-content = -// -// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : -// -// TEXT = -// LWS = [CRLF] 1*( SP | HT ) -// CTL = -// -// RFC 7230 says: -// field-value = *( field-content / obs-fold ) -// obj-fold = N/A to http2, and deprecated -// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] -// field-vchar = VCHAR / obs-text -// obs-text = %x80-FF -// VCHAR = "any visible [USASCII] character" -// -// http2 further says: "Similarly, HTTP/2 allows header field values -// that are not valid. While most of the values that can be encoded -// will not alter header field parsing, carriage return (CR, ASCII -// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII -// 0x0) might be exploited by an attacker if they are translated -// verbatim. Any request or response that contains a character not -// permitted in a header field value MUST be treated as malformed -// (Section 8.1.2.6). Valid characters are defined by the -// field-content ABNF rule in Section 3.2 of [RFC7230]." -// -// This function does not (yet?) properly handle the rejection of -// strings that begin or end with SP or HTAB. -func ValidHeaderFieldValue(v string) bool { - for i := 0; i < len(v); i++ { - b := v[i] - if isCTL(b) && !isLWS(b) { - return false - } - } - return true -} - -func isASCII(s string) bool { - for i := 0; i < len(s); i++ { - if s[i] >= utf8.RuneSelf { - return false - } - } - return true -} - -// PunycodeHostPort returns the IDNA Punycode version -// of the provided "host" or "host:port" string. -func PunycodeHostPort(v string) (string, error) { - if isASCII(v) { - return v, nil - } - - host, port, err := net.SplitHostPort(v) - if err != nil { - // The input 'v' argument was just a "host" argument, - // without a port. This error should not be returned - // to the caller. - host = v - port = "" - } - host, err = idna.ToASCII(host) - if err != nil { - // Non-UTF-8? Not representable in Punycode, in any - // case. - return "", err - } - if port == "" { - return host, nil - } - return net.JoinHostPort(host, port), nil -} diff --git a/vendor/golang.org/x/net/trace/BUILD b/vendor/golang.org/x/net/trace/BUILD deleted file mode 100644 index 787f9b29a861..000000000000 --- a/vendor/golang.org/x/net/trace/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "events.go", - "histogram.go", - "trace.go", - ], - importpath = "golang.org/x/net/trace", - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/golang.org/x/net/internal/timeseries:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go deleted file mode 100644 index d8daec1a791f..000000000000 --- a/vendor/golang.org/x/net/trace/events.go +++ /dev/null @@ -1,532 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -import ( - "bytes" - "fmt" - "html/template" - "io" - "log" - "net/http" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "text/tabwriter" - "time" -) - -const maxEventsPerLog = 100 - -type bucket struct { - MaxErrAge time.Duration - String string -} - -var buckets = []bucket{ - {0, "total"}, - {10 * time.Second, "errs<10s"}, - {1 * time.Minute, "errs<1m"}, - {10 * time.Minute, "errs<10m"}, - {1 * time.Hour, "errs<1h"}, - {10 * time.Hour, "errs<10h"}, - {24000 * time.Hour, "errors"}, -} - -// RenderEvents renders the HTML page typically served at /debug/events. -// It does not do any auth checking; see AuthRequest for the default auth check -// used by the handler registered on http.DefaultServeMux. -// req may be nil. -func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { - now := time.Now() - data := &struct { - Families []string // family names - Buckets []bucket - Counts [][]int // eventLog count per family/bucket - - // Set when a bucket has been selected. - Family string - Bucket int - EventLogs eventLogs - Expanded bool - }{ - Buckets: buckets, - } - - data.Families = make([]string, 0, len(families)) - famMu.RLock() - for name := range families { - data.Families = append(data.Families, name) - } - famMu.RUnlock() - sort.Strings(data.Families) - - // Count the number of eventLogs in each family for each error age. - data.Counts = make([][]int, len(data.Families)) - for i, name := range data.Families { - // TODO(sameer): move this loop under the family lock. - f := getEventFamily(name) - data.Counts[i] = make([]int, len(data.Buckets)) - for j, b := range data.Buckets { - data.Counts[i][j] = f.Count(now, b.MaxErrAge) - } - } - - if req != nil { - var ok bool - data.Family, data.Bucket, ok = parseEventsArgs(req) - if !ok { - // No-op - } else { - data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) - } - if data.EventLogs != nil { - defer data.EventLogs.Free() - sort.Sort(data.EventLogs) - } - if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { - data.Expanded = exp - } - } - - famMu.RLock() - defer famMu.RUnlock() - if err := eventsTmpl().Execute(w, data); err != nil { - log.Printf("net/trace: Failed executing template: %v", err) - } -} - -func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { - fam, bStr := req.FormValue("fam"), req.FormValue("b") - if fam == "" || bStr == "" { - return "", 0, false - } - b, err := strconv.Atoi(bStr) - if err != nil || b < 0 || b >= len(buckets) { - return "", 0, false - } - return fam, b, true -} - -// An EventLog provides a log of events associated with a specific object. -type EventLog interface { - // Printf formats its arguments with fmt.Sprintf and adds the - // result to the event log. - Printf(format string, a ...interface{}) - - // Errorf is like Printf, but it marks this event as an error. - Errorf(format string, a ...interface{}) - - // Finish declares that this event log is complete. - // The event log should not be used after calling this method. - Finish() -} - -// NewEventLog returns a new EventLog with the specified family name -// and title. -func NewEventLog(family, title string) EventLog { - el := newEventLog() - el.ref() - el.Family, el.Title = family, title - el.Start = time.Now() - el.events = make([]logEntry, 0, maxEventsPerLog) - el.stack = make([]uintptr, 32) - n := runtime.Callers(2, el.stack) - el.stack = el.stack[:n] - - getEventFamily(family).add(el) - return el -} - -func (el *eventLog) Finish() { - getEventFamily(el.Family).remove(el) - el.unref() // matches ref in New -} - -var ( - famMu sync.RWMutex - families = make(map[string]*eventFamily) // family name => family -) - -func getEventFamily(fam string) *eventFamily { - famMu.Lock() - defer famMu.Unlock() - f := families[fam] - if f == nil { - f = &eventFamily{} - families[fam] = f - } - return f -} - -type eventFamily struct { - mu sync.RWMutex - eventLogs eventLogs -} - -func (f *eventFamily) add(el *eventLog) { - f.mu.Lock() - f.eventLogs = append(f.eventLogs, el) - f.mu.Unlock() -} - -func (f *eventFamily) remove(el *eventLog) { - f.mu.Lock() - defer f.mu.Unlock() - for i, el0 := range f.eventLogs { - if el == el0 { - copy(f.eventLogs[i:], f.eventLogs[i+1:]) - f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] - return - } - } -} - -func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { - f.mu.RLock() - defer f.mu.RUnlock() - for _, el := range f.eventLogs { - if el.hasRecentError(now, maxErrAge) { - n++ - } - } - return -} - -func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { - f.mu.RLock() - defer f.mu.RUnlock() - els = make(eventLogs, 0, len(f.eventLogs)) - for _, el := range f.eventLogs { - if el.hasRecentError(now, maxErrAge) { - el.ref() - els = append(els, el) - } - } - return -} - -type eventLogs []*eventLog - -// Free calls unref on each element of the list. -func (els eventLogs) Free() { - for _, el := range els { - el.unref() - } -} - -// eventLogs may be sorted in reverse chronological order. -func (els eventLogs) Len() int { return len(els) } -func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } -func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } - -// A logEntry is a timestamped log entry in an event log. -type logEntry struct { - When time.Time - Elapsed time.Duration // since previous event in log - NewDay bool // whether this event is on a different day to the previous event - What string - IsErr bool -} - -// WhenString returns a string representation of the elapsed time of the event. -// It will include the date if midnight was crossed. -func (e logEntry) WhenString() string { - if e.NewDay { - return e.When.Format("2006/01/02 15:04:05.000000") - } - return e.When.Format("15:04:05.000000") -} - -// An eventLog represents an active event log. -type eventLog struct { - // Family is the top-level grouping of event logs to which this belongs. - Family string - - // Title is the title of this event log. - Title string - - // Timing information. - Start time.Time - - // Call stack where this event log was created. - stack []uintptr - - // Append-only sequence of events. - // - // TODO(sameer): change this to a ring buffer to avoid the array copy - // when we hit maxEventsPerLog. - mu sync.RWMutex - events []logEntry - LastErrorTime time.Time - discarded int - - refs int32 // how many buckets this is in -} - -func (el *eventLog) reset() { - // Clear all but the mutex. Mutexes may not be copied, even when unlocked. - el.Family = "" - el.Title = "" - el.Start = time.Time{} - el.stack = nil - el.events = nil - el.LastErrorTime = time.Time{} - el.discarded = 0 - el.refs = 0 -} - -func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { - if maxErrAge == 0 { - return true - } - el.mu.RLock() - defer el.mu.RUnlock() - return now.Sub(el.LastErrorTime) < maxErrAge -} - -// delta returns the elapsed time since the last event or the log start, -// and whether it spans midnight. -// L >= el.mu -func (el *eventLog) delta(t time.Time) (time.Duration, bool) { - if len(el.events) == 0 { - return t.Sub(el.Start), false - } - prev := el.events[len(el.events)-1].When - return t.Sub(prev), prev.Day() != t.Day() - -} - -func (el *eventLog) Printf(format string, a ...interface{}) { - el.printf(false, format, a...) -} - -func (el *eventLog) Errorf(format string, a ...interface{}) { - el.printf(true, format, a...) -} - -func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { - e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} - el.mu.Lock() - e.Elapsed, e.NewDay = el.delta(e.When) - if len(el.events) < maxEventsPerLog { - el.events = append(el.events, e) - } else { - // Discard the oldest event. - if el.discarded == 0 { - // el.discarded starts at two to count for the event it - // is replacing, plus the next one that we are about to - // drop. - el.discarded = 2 - } else { - el.discarded++ - } - // TODO(sameer): if this causes allocations on a critical path, - // change eventLog.What to be a fmt.Stringer, as in trace.go. - el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) - // The timestamp of the discarded meta-event should be - // the time of the last event it is representing. - el.events[0].When = el.events[1].When - copy(el.events[1:], el.events[2:]) - el.events[maxEventsPerLog-1] = e - } - if e.IsErr { - el.LastErrorTime = e.When - } - el.mu.Unlock() -} - -func (el *eventLog) ref() { - atomic.AddInt32(&el.refs, 1) -} - -func (el *eventLog) unref() { - if atomic.AddInt32(&el.refs, -1) == 0 { - freeEventLog(el) - } -} - -func (el *eventLog) When() string { - return el.Start.Format("2006/01/02 15:04:05.000000") -} - -func (el *eventLog) ElapsedTime() string { - elapsed := time.Since(el.Start) - return fmt.Sprintf("%.6f", elapsed.Seconds()) -} - -func (el *eventLog) Stack() string { - buf := new(bytes.Buffer) - tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) - printStackRecord(tw, el.stack) - tw.Flush() - return buf.String() -} - -// printStackRecord prints the function + source line information -// for a single stack trace. -// Adapted from runtime/pprof/pprof.go. -func printStackRecord(w io.Writer, stk []uintptr) { - for _, pc := range stk { - f := runtime.FuncForPC(pc) - if f == nil { - continue - } - file, line := f.FileLine(pc) - name := f.Name() - // Hide runtime.goexit and any runtime functions at the beginning. - if strings.HasPrefix(name, "runtime.") { - continue - } - fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) - } -} - -func (el *eventLog) Events() []logEntry { - el.mu.RLock() - defer el.mu.RUnlock() - return el.events -} - -// freeEventLogs is a freelist of *eventLog -var freeEventLogs = make(chan *eventLog, 1000) - -// newEventLog returns a event log ready to use. -func newEventLog() *eventLog { - select { - case el := <-freeEventLogs: - return el - default: - return new(eventLog) - } -} - -// freeEventLog adds el to freeEventLogs if there's room. -// This is non-blocking. -func freeEventLog(el *eventLog) { - el.reset() - select { - case freeEventLogs <- el: - default: - } -} - -var eventsTmplCache *template.Template -var eventsTmplOnce sync.Once - -func eventsTmpl() *template.Template { - eventsTmplOnce.Do(func() { - eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ - "elapsed": elapsed, - "trimSpace": strings.TrimSpace, - }).Parse(eventsHTML)) - }) - return eventsTmplCache -} - -const eventsHTML = ` - - - events - - - - -

/debug/events

- - - {{range $i, $fam := .Families}} - - - - {{range $j, $bucket := $.Buckets}} - {{$n := index $.Counts $i $j}} - - {{end}} - - {{end}} -
{{$fam}} - {{if $n}}{{end}} - [{{$n}} {{$bucket.String}}] - {{if $n}}{{end}} -
- -{{if $.EventLogs}} -
-

Family: {{$.Family}}

- -{{if $.Expanded}}{{end}} -[Summary]{{if $.Expanded}}{{end}} - -{{if not $.Expanded}}{{end}} -[Expanded]{{if not $.Expanded}}{{end}} - - - - {{range $el := $.EventLogs}} - - - - - {{if $.Expanded}} - - - - - - {{range $el.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} -
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
-{{end}} - - -` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go deleted file mode 100644 index 9bf4286c794b..000000000000 --- a/vendor/golang.org/x/net/trace/histogram.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -// This file implements histogramming for RPC statistics collection. - -import ( - "bytes" - "fmt" - "html/template" - "log" - "math" - "sync" - - "golang.org/x/net/internal/timeseries" -) - -const ( - bucketCount = 38 -) - -// histogram keeps counts of values in buckets that are spaced -// out in powers of 2: 0-1, 2-3, 4-7... -// histogram implements timeseries.Observable -type histogram struct { - sum int64 // running total of measurements - sumOfSquares float64 // square of running total - buckets []int64 // bucketed values for histogram - value int // holds a single value as an optimization - valueCount int64 // number of values recorded for single value -} - -// AddMeasurement records a value measurement observation to the histogram. -func (h *histogram) addMeasurement(value int64) { - // TODO: assert invariant - h.sum += value - h.sumOfSquares += float64(value) * float64(value) - - bucketIndex := getBucket(value) - - if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { - h.value = bucketIndex - h.valueCount++ - } else { - h.allocateBuckets() - h.buckets[bucketIndex]++ - } -} - -func (h *histogram) allocateBuckets() { - if h.buckets == nil { - h.buckets = make([]int64, bucketCount) - h.buckets[h.value] = h.valueCount - h.value = 0 - h.valueCount = -1 - } -} - -func log2(i int64) int { - n := 0 - for ; i >= 0x100; i >>= 8 { - n += 8 - } - for ; i > 0; i >>= 1 { - n += 1 - } - return n -} - -func getBucket(i int64) (index int) { - index = log2(i) - 1 - if index < 0 { - index = 0 - } - if index >= bucketCount { - index = bucketCount - 1 - } - return -} - -// Total returns the number of recorded observations. -func (h *histogram) total() (total int64) { - if h.valueCount >= 0 { - total = h.valueCount - } - for _, val := range h.buckets { - total += int64(val) - } - return -} - -// Average returns the average value of recorded observations. -func (h *histogram) average() float64 { - t := h.total() - if t == 0 { - return 0 - } - return float64(h.sum) / float64(t) -} - -// Variance returns the variance of recorded observations. -func (h *histogram) variance() float64 { - t := float64(h.total()) - if t == 0 { - return 0 - } - s := float64(h.sum) / t - return h.sumOfSquares/t - s*s -} - -// StandardDeviation returns the standard deviation of recorded observations. -func (h *histogram) standardDeviation() float64 { - return math.Sqrt(h.variance()) -} - -// PercentileBoundary estimates the value that the given fraction of recorded -// observations are less than. -func (h *histogram) percentileBoundary(percentile float64) int64 { - total := h.total() - - // Corner cases (make sure result is strictly less than Total()) - if total == 0 { - return 0 - } else if total == 1 { - return int64(h.average()) - } - - percentOfTotal := round(float64(total) * percentile) - var runningTotal int64 - - for i := range h.buckets { - value := h.buckets[i] - runningTotal += value - if runningTotal == percentOfTotal { - // We hit an exact bucket boundary. If the next bucket has data, it is a - // good estimate of the value. If the bucket is empty, we interpolate the - // midpoint between the next bucket's boundary and the next non-zero - // bucket. If the remaining buckets are all empty, then we use the - // boundary for the next bucket as the estimate. - j := uint8(i + 1) - min := bucketBoundary(j) - if runningTotal < total { - for h.buckets[j] == 0 { - j++ - } - } - max := bucketBoundary(j) - return min + round(float64(max-min)/2) - } else if runningTotal > percentOfTotal { - // The value is in this bucket. Interpolate the value. - delta := runningTotal - percentOfTotal - percentBucket := float64(value-delta) / float64(value) - bucketMin := bucketBoundary(uint8(i)) - nextBucketMin := bucketBoundary(uint8(i + 1)) - bucketSize := nextBucketMin - bucketMin - return bucketMin + round(percentBucket*float64(bucketSize)) - } - } - return bucketBoundary(bucketCount - 1) -} - -// Median returns the estimated median of the observed values. -func (h *histogram) median() int64 { - return h.percentileBoundary(0.5) -} - -// Add adds other to h. -func (h *histogram) Add(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == 0 { - // Other histogram is empty - } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { - // Both have a single bucketed value, aggregate them - h.valueCount += o.valueCount - } else { - // Two different values necessitate buckets in this histogram - h.allocateBuckets() - if o.valueCount >= 0 { - h.buckets[o.value] += o.valueCount - } else { - for i := range h.buckets { - h.buckets[i] += o.buckets[i] - } - } - } - h.sumOfSquares += o.sumOfSquares - h.sum += o.sum -} - -// Clear resets the histogram to an empty state, removing all observed values. -func (h *histogram) Clear() { - h.buckets = nil - h.value = 0 - h.valueCount = 0 - h.sum = 0 - h.sumOfSquares = 0 -} - -// CopyFrom copies from other, which must be a *histogram, into h. -func (h *histogram) CopyFrom(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == -1 { - h.allocateBuckets() - copy(h.buckets, o.buckets) - } - h.sum = o.sum - h.sumOfSquares = o.sumOfSquares - h.value = o.value - h.valueCount = o.valueCount -} - -// Multiply scales the histogram by the specified ratio. -func (h *histogram) Multiply(ratio float64) { - if h.valueCount == -1 { - for i := range h.buckets { - h.buckets[i] = int64(float64(h.buckets[i]) * ratio) - } - } else { - h.valueCount = int64(float64(h.valueCount) * ratio) - } - h.sum = int64(float64(h.sum) * ratio) - h.sumOfSquares = h.sumOfSquares * ratio -} - -// New creates a new histogram. -func (h *histogram) New() timeseries.Observable { - r := new(histogram) - r.Clear() - return r -} - -func (h *histogram) String() string { - return fmt.Sprintf("%d, %f, %d, %d, %v", - h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) -} - -// round returns the closest int64 to the argument -func round(in float64) int64 { - return int64(math.Floor(in + 0.5)) -} - -// bucketBoundary returns the first value in the bucket. -func bucketBoundary(bucket uint8) int64 { - if bucket == 0 { - return 0 - } - return 1 << bucket -} - -// bucketData holds data about a specific bucket for use in distTmpl. -type bucketData struct { - Lower, Upper int64 - N int64 - Pct, CumulativePct float64 - GraphWidth int -} - -// data holds data about a Distribution for use in distTmpl. -type data struct { - Buckets []*bucketData - Count, Median int64 - Mean, StandardDeviation float64 -} - -// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. -const maxHTMLBarWidth = 350.0 - -// newData returns data representing h for use in distTmpl. -func (h *histogram) newData() *data { - // Force the allocation of buckets to simplify the rendering implementation - h.allocateBuckets() - // We scale the bars on the right so that the largest bar is - // maxHTMLBarWidth pixels in width. - maxBucket := int64(0) - for _, n := range h.buckets { - if n > maxBucket { - maxBucket = n - } - } - total := h.total() - barsizeMult := maxHTMLBarWidth / float64(maxBucket) - var pctMult float64 - if total == 0 { - pctMult = 1.0 - } else { - pctMult = 100.0 / float64(total) - } - - buckets := make([]*bucketData, len(h.buckets)) - runningTotal := int64(0) - for i, n := range h.buckets { - if n == 0 { - continue - } - runningTotal += n - var upperBound int64 - if i < bucketCount-1 { - upperBound = bucketBoundary(uint8(i + 1)) - } else { - upperBound = math.MaxInt64 - } - buckets[i] = &bucketData{ - Lower: bucketBoundary(uint8(i)), - Upper: upperBound, - N: n, - Pct: float64(n) * pctMult, - CumulativePct: float64(runningTotal) * pctMult, - GraphWidth: int(float64(n) * barsizeMult), - } - } - return &data{ - Buckets: buckets, - Count: total, - Median: h.median(), - Mean: h.average(), - StandardDeviation: h.standardDeviation(), - } -} - -func (h *histogram) html() template.HTML { - buf := new(bytes.Buffer) - if err := distTmpl().Execute(buf, h.newData()); err != nil { - buf.Reset() - log.Printf("net/trace: couldn't execute template: %v", err) - } - return template.HTML(buf.String()) -} - -var distTmplCache *template.Template -var distTmplOnce sync.Once - -func distTmpl() *template.Template { - distTmplOnce.Do(func() { - // Input: data - distTmplCache = template.Must(template.New("distTmpl").Parse(` - - - - - - - -
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
-
- -{{range $b := .Buckets}} -{{if $b}} - - - - - - - - - -{{end}} -{{end}} -
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
-`)) - }) - return distTmplCache -} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go deleted file mode 100644 index 9eb55e7fd449..000000000000 --- a/vendor/golang.org/x/net/trace/trace.go +++ /dev/null @@ -1,1079 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package trace implements tracing of requests and long-lived objects. -It exports HTTP interfaces on /debug/requests and /debug/events. - -A trace.Trace provides tracing for short-lived objects, usually requests. -A request handler might be implemented like this: - - func fooHandler(w http.ResponseWriter, req *http.Request) { - tr := trace.New("mypkg.Foo", req.URL.Path) - defer tr.Finish() - ... - tr.LazyPrintf("some event %q happened", str) - ... - if err := somethingImportant(); err != nil { - tr.LazyPrintf("somethingImportant failed: %v", err) - tr.SetError() - } - } - -The /debug/requests HTTP endpoint organizes the traces by family, -errors, and duration. It also provides histogram of request duration -for each family. - -A trace.EventLog provides tracing for long-lived objects, such as RPC -connections. - - // A Fetcher fetches URL paths for a single domain. - type Fetcher struct { - domain string - events trace.EventLog - } - - func NewFetcher(domain string) *Fetcher { - return &Fetcher{ - domain, - trace.NewEventLog("mypkg.Fetcher", domain), - } - } - - func (f *Fetcher) Fetch(path string) (string, error) { - resp, err := http.Get("http://" + f.domain + "/" + path) - if err != nil { - f.events.Errorf("Get(%q) = %v", path, err) - return "", err - } - f.events.Printf("Get(%q) = %s", path, resp.Status) - ... - } - - func (f *Fetcher) Close() error { - f.events.Finish() - return nil - } - -The /debug/events HTTP endpoint organizes the event logs by family and -by time since the last error. The expanded view displays recent log -entries and the log's call stack. -*/ -package trace - -import ( - "bytes" - "fmt" - "html/template" - "io" - "log" - "net" - "net/http" - "runtime" - "sort" - "strconv" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/internal/timeseries" -) - -// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. -// FOR DEBUGGING ONLY. This will slow down the program. -var DebugUseAfterFinish = false - -// AuthRequest determines whether a specific request is permitted to load the -// /debug/requests or /debug/events pages. -// -// It returns two bools; the first indicates whether the page may be viewed at all, -// and the second indicates whether sensitive events will be shown. -// -// AuthRequest may be replaced by a program to customize its authorization requirements. -// -// The default AuthRequest function returns (true, true) if and only if the request -// comes from localhost/127.0.0.1/[::1]. -var AuthRequest = func(req *http.Request) (any, sensitive bool) { - // RemoteAddr is commonly in the form "IP" or "IP:port". - // If it is in the form "IP:port", split off the port. - host, _, err := net.SplitHostPort(req.RemoteAddr) - if err != nil { - host = req.RemoteAddr - } - switch host { - case "localhost", "127.0.0.1", "::1": - return true, true - default: - return false, false - } -} - -func init() { - http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - Render(w, req, sensitive) - }) - http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - RenderEvents(w, req, sensitive) - }) -} - -// Render renders the HTML page typically served at /debug/requests. -// It does not do any auth checking; see AuthRequest for the default auth check -// used by the handler registered on http.DefaultServeMux. -// req may be nil. -func Render(w io.Writer, req *http.Request, sensitive bool) { - data := &struct { - Families []string - ActiveTraceCount map[string]int - CompletedTraces map[string]*family - - // Set when a bucket has been selected. - Traces traceList - Family string - Bucket int - Expanded bool - Traced bool - Active bool - ShowSensitive bool // whether to show sensitive events - - Histogram template.HTML - HistogramWindow string // e.g. "last minute", "last hour", "all time" - - // If non-zero, the set of traces is a partial set, - // and this is the total number. - Total int - }{ - CompletedTraces: completedTraces, - } - - data.ShowSensitive = sensitive - if req != nil { - // Allow show_sensitive=0 to force hiding of sensitive data for testing. - // This only goes one way; you can't use show_sensitive=1 to see things. - if req.FormValue("show_sensitive") == "0" { - data.ShowSensitive = false - } - - if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { - data.Expanded = exp - } - if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { - data.Traced = exp - } - } - - completedMu.RLock() - data.Families = make([]string, 0, len(completedTraces)) - for fam := range completedTraces { - data.Families = append(data.Families, fam) - } - completedMu.RUnlock() - sort.Strings(data.Families) - - // We are careful here to minimize the time spent locking activeMu, - // since that lock is required every time an RPC starts and finishes. - data.ActiveTraceCount = make(map[string]int, len(data.Families)) - activeMu.RLock() - for fam, s := range activeTraces { - data.ActiveTraceCount[fam] = s.Len() - } - activeMu.RUnlock() - - var ok bool - data.Family, data.Bucket, ok = parseArgs(req) - switch { - case !ok: - // No-op - case data.Bucket == -1: - data.Active = true - n := data.ActiveTraceCount[data.Family] - data.Traces = getActiveTraces(data.Family) - if len(data.Traces) < n { - data.Total = n - } - case data.Bucket < bucketsPerFamily: - if b := lookupBucket(data.Family, data.Bucket); b != nil { - data.Traces = b.Copy(data.Traced) - } - default: - if f := getFamily(data.Family, false); f != nil { - var obs timeseries.Observable - f.LatencyMu.RLock() - switch o := data.Bucket - bucketsPerFamily; o { - case 0: - obs = f.Latency.Minute() - data.HistogramWindow = "last minute" - case 1: - obs = f.Latency.Hour() - data.HistogramWindow = "last hour" - case 2: - obs = f.Latency.Total() - data.HistogramWindow = "all time" - } - f.LatencyMu.RUnlock() - if obs != nil { - data.Histogram = obs.(*histogram).html() - } - } - } - - if data.Traces != nil { - defer data.Traces.Free() - sort.Sort(data.Traces) - } - - completedMu.RLock() - defer completedMu.RUnlock() - if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { - log.Printf("net/trace: Failed executing template: %v", err) - } -} - -func parseArgs(req *http.Request) (fam string, b int, ok bool) { - if req == nil { - return "", 0, false - } - fam, bStr := req.FormValue("fam"), req.FormValue("b") - if fam == "" || bStr == "" { - return "", 0, false - } - b, err := strconv.Atoi(bStr) - if err != nil || b < -1 { - return "", 0, false - } - - return fam, b, true -} - -func lookupBucket(fam string, b int) *traceBucket { - f := getFamily(fam, false) - if f == nil || b < 0 || b >= len(f.Buckets) { - return nil - } - return f.Buckets[b] -} - -type contextKeyT string - -var contextKey = contextKeyT("golang.org/x/net/trace.Trace") - -// NewContext returns a copy of the parent context -// and associates it with a Trace. -func NewContext(ctx context.Context, tr Trace) context.Context { - return context.WithValue(ctx, contextKey, tr) -} - -// FromContext returns the Trace bound to the context, if any. -func FromContext(ctx context.Context) (tr Trace, ok bool) { - tr, ok = ctx.Value(contextKey).(Trace) - return -} - -// Trace represents an active request. -type Trace interface { - // LazyLog adds x to the event log. It will be evaluated each time the - // /debug/requests page is rendered. Any memory referenced by x will be - // pinned until the trace is finished and later discarded. - LazyLog(x fmt.Stringer, sensitive bool) - - // LazyPrintf evaluates its arguments with fmt.Sprintf each time the - // /debug/requests page is rendered. Any memory referenced by a will be - // pinned until the trace is finished and later discarded. - LazyPrintf(format string, a ...interface{}) - - // SetError declares that this trace resulted in an error. - SetError() - - // SetRecycler sets a recycler for the trace. - // f will be called for each event passed to LazyLog at a time when - // it is no longer required, whether while the trace is still active - // and the event is discarded, or when a completed trace is discarded. - SetRecycler(f func(interface{})) - - // SetTraceInfo sets the trace info for the trace. - // This is currently unused. - SetTraceInfo(traceID, spanID uint64) - - // SetMaxEvents sets the maximum number of events that will be stored - // in the trace. This has no effect if any events have already been - // added to the trace. - SetMaxEvents(m int) - - // Finish declares that this trace is complete. - // The trace should not be used after calling this method. - Finish() -} - -type lazySprintf struct { - format string - a []interface{} -} - -func (l *lazySprintf) String() string { - return fmt.Sprintf(l.format, l.a...) -} - -// New returns a new Trace with the specified family and title. -func New(family, title string) Trace { - tr := newTrace() - tr.ref() - tr.Family, tr.Title = family, title - tr.Start = time.Now() - tr.maxEvents = maxEventsPerTrace - tr.events = tr.eventsBuf[:0] - - activeMu.RLock() - s := activeTraces[tr.Family] - activeMu.RUnlock() - if s == nil { - activeMu.Lock() - s = activeTraces[tr.Family] // check again - if s == nil { - s = new(traceSet) - activeTraces[tr.Family] = s - } - activeMu.Unlock() - } - s.Add(tr) - - // Trigger allocation of the completed trace structure for this family. - // This will cause the family to be present in the request page during - // the first trace of this family. We don't care about the return value, - // nor is there any need for this to run inline, so we execute it in its - // own goroutine, but only if the family isn't allocated yet. - completedMu.RLock() - if _, ok := completedTraces[tr.Family]; !ok { - go allocFamily(tr.Family) - } - completedMu.RUnlock() - - return tr -} - -func (tr *trace) Finish() { - tr.Elapsed = time.Now().Sub(tr.Start) - if DebugUseAfterFinish { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - tr.finishStack = buf[:n] - } - - activeMu.RLock() - m := activeTraces[tr.Family] - activeMu.RUnlock() - m.Remove(tr) - - f := getFamily(tr.Family, true) - for _, b := range f.Buckets { - if b.Cond.match(tr) { - b.Add(tr) - } - } - // Add a sample of elapsed time as microseconds to the family's timeseries - h := new(histogram) - h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3) - f.LatencyMu.Lock() - f.Latency.Add(h) - f.LatencyMu.Unlock() - - tr.unref() // matches ref in New -} - -const ( - bucketsPerFamily = 9 - tracesPerBucket = 10 - maxActiveTraces = 20 // Maximum number of active traces to show. - maxEventsPerTrace = 10 - numHistogramBuckets = 38 -) - -var ( - // The active traces. - activeMu sync.RWMutex - activeTraces = make(map[string]*traceSet) // family -> traces - - // Families of completed traces. - completedMu sync.RWMutex - completedTraces = make(map[string]*family) // family -> traces -) - -type traceSet struct { - mu sync.RWMutex - m map[*trace]bool - - // We could avoid the entire map scan in FirstN by having a slice of all the traces - // ordered by start time, and an index into that from the trace struct, with a periodic - // repack of the slice after enough traces finish; we could also use a skip list or similar. - // However, that would shift some of the expense from /debug/requests time to RPC time, - // which is probably the wrong trade-off. -} - -func (ts *traceSet) Len() int { - ts.mu.RLock() - defer ts.mu.RUnlock() - return len(ts.m) -} - -func (ts *traceSet) Add(tr *trace) { - ts.mu.Lock() - if ts.m == nil { - ts.m = make(map[*trace]bool) - } - ts.m[tr] = true - ts.mu.Unlock() -} - -func (ts *traceSet) Remove(tr *trace) { - ts.mu.Lock() - delete(ts.m, tr) - ts.mu.Unlock() -} - -// FirstN returns the first n traces ordered by time. -func (ts *traceSet) FirstN(n int) traceList { - ts.mu.RLock() - defer ts.mu.RUnlock() - - if n > len(ts.m) { - n = len(ts.m) - } - trl := make(traceList, 0, n) - - // Fast path for when no selectivity is needed. - if n == len(ts.m) { - for tr := range ts.m { - tr.ref() - trl = append(trl, tr) - } - sort.Sort(trl) - return trl - } - - // Pick the oldest n traces. - // This is inefficient. See the comment in the traceSet struct. - for tr := range ts.m { - // Put the first n traces into trl in the order they occur. - // When we have n, sort trl, and thereafter maintain its order. - if len(trl) < n { - tr.ref() - trl = append(trl, tr) - if len(trl) == n { - // This is guaranteed to happen exactly once during this loop. - sort.Sort(trl) - } - continue - } - if tr.Start.After(trl[n-1].Start) { - continue - } - - // Find where to insert this one. - tr.ref() - i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) - trl[n-1].unref() - copy(trl[i+1:], trl[i:]) - trl[i] = tr - } - - return trl -} - -func getActiveTraces(fam string) traceList { - activeMu.RLock() - s := activeTraces[fam] - activeMu.RUnlock() - if s == nil { - return nil - } - return s.FirstN(maxActiveTraces) -} - -func getFamily(fam string, allocNew bool) *family { - completedMu.RLock() - f := completedTraces[fam] - completedMu.RUnlock() - if f == nil && allocNew { - f = allocFamily(fam) - } - return f -} - -func allocFamily(fam string) *family { - completedMu.Lock() - defer completedMu.Unlock() - f := completedTraces[fam] - if f == nil { - f = newFamily() - completedTraces[fam] = f - } - return f -} - -// family represents a set of trace buckets and associated latency information. -type family struct { - // traces may occur in multiple buckets. - Buckets [bucketsPerFamily]*traceBucket - - // latency time series - LatencyMu sync.RWMutex - Latency *timeseries.MinuteHourSeries -} - -func newFamily() *family { - return &family{ - Buckets: [bucketsPerFamily]*traceBucket{ - {Cond: minCond(0)}, - {Cond: minCond(50 * time.Millisecond)}, - {Cond: minCond(100 * time.Millisecond)}, - {Cond: minCond(200 * time.Millisecond)}, - {Cond: minCond(500 * time.Millisecond)}, - {Cond: minCond(1 * time.Second)}, - {Cond: minCond(10 * time.Second)}, - {Cond: minCond(100 * time.Second)}, - {Cond: errorCond{}}, - }, - Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), - } -} - -// traceBucket represents a size-capped bucket of historic traces, -// along with a condition for a trace to belong to the bucket. -type traceBucket struct { - Cond cond - - // Ring buffer implementation of a fixed-size FIFO queue. - mu sync.RWMutex - buf [tracesPerBucket]*trace - start int // < tracesPerBucket - length int // <= tracesPerBucket -} - -func (b *traceBucket) Add(tr *trace) { - b.mu.Lock() - defer b.mu.Unlock() - - i := b.start + b.length - if i >= tracesPerBucket { - i -= tracesPerBucket - } - if b.length == tracesPerBucket { - // "Remove" an element from the bucket. - b.buf[i].unref() - b.start++ - if b.start == tracesPerBucket { - b.start = 0 - } - } - b.buf[i] = tr - if b.length < tracesPerBucket { - b.length++ - } - tr.ref() -} - -// Copy returns a copy of the traces in the bucket. -// If tracedOnly is true, only the traces with trace information will be returned. -// The logs will be ref'd before returning; the caller should call -// the Free method when it is done with them. -// TODO(dsymonds): keep track of traced requests in separate buckets. -func (b *traceBucket) Copy(tracedOnly bool) traceList { - b.mu.RLock() - defer b.mu.RUnlock() - - trl := make(traceList, 0, b.length) - for i, x := 0, b.start; i < b.length; i++ { - tr := b.buf[x] - if !tracedOnly || tr.spanID != 0 { - tr.ref() - trl = append(trl, tr) - } - x++ - if x == b.length { - x = 0 - } - } - return trl -} - -func (b *traceBucket) Empty() bool { - b.mu.RLock() - defer b.mu.RUnlock() - return b.length == 0 -} - -// cond represents a condition on a trace. -type cond interface { - match(t *trace) bool - String() string -} - -type minCond time.Duration - -func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } -func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } - -type errorCond struct{} - -func (e errorCond) match(t *trace) bool { return t.IsError } -func (e errorCond) String() string { return "errors" } - -type traceList []*trace - -// Free calls unref on each element of the list. -func (trl traceList) Free() { - for _, t := range trl { - t.unref() - } -} - -// traceList may be sorted in reverse chronological order. -func (trl traceList) Len() int { return len(trl) } -func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } -func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } - -// An event is a timestamped log entry in a trace. -type event struct { - When time.Time - Elapsed time.Duration // since previous event in trace - NewDay bool // whether this event is on a different day to the previous event - Recyclable bool // whether this event was passed via LazyLog - Sensitive bool // whether this event contains sensitive information - What interface{} // string or fmt.Stringer -} - -// WhenString returns a string representation of the elapsed time of the event. -// It will include the date if midnight was crossed. -func (e event) WhenString() string { - if e.NewDay { - return e.When.Format("2006/01/02 15:04:05.000000") - } - return e.When.Format("15:04:05.000000") -} - -// discarded represents a number of discarded events. -// It is stored as *discarded to make it easier to update in-place. -type discarded int - -func (d *discarded) String() string { - return fmt.Sprintf("(%d events discarded)", int(*d)) -} - -// trace represents an active or complete request, -// either sent or received by this program. -type trace struct { - // Family is the top-level grouping of traces to which this belongs. - Family string - - // Title is the title of this trace. - Title string - - // Timing information. - Start time.Time - Elapsed time.Duration // zero while active - - // Trace information if non-zero. - traceID uint64 - spanID uint64 - - // Whether this trace resulted in an error. - IsError bool - - // Append-only sequence of events (modulo discards). - mu sync.RWMutex - events []event - maxEvents int - - refs int32 // how many buckets this is in - recycler func(interface{}) - disc discarded // scratch space to avoid allocation - - finishStack []byte // where finish was called, if DebugUseAfterFinish is set - - eventsBuf [4]event // preallocated buffer in case we only log a few events -} - -func (tr *trace) reset() { - // Clear all but the mutex. Mutexes may not be copied, even when unlocked. - tr.Family = "" - tr.Title = "" - tr.Start = time.Time{} - tr.Elapsed = 0 - tr.traceID = 0 - tr.spanID = 0 - tr.IsError = false - tr.maxEvents = 0 - tr.events = nil - tr.refs = 0 - tr.recycler = nil - tr.disc = 0 - tr.finishStack = nil - for i := range tr.eventsBuf { - tr.eventsBuf[i] = event{} - } -} - -// delta returns the elapsed time since the last event or the trace start, -// and whether it spans midnight. -// L >= tr.mu -func (tr *trace) delta(t time.Time) (time.Duration, bool) { - if len(tr.events) == 0 { - return t.Sub(tr.Start), false - } - prev := tr.events[len(tr.events)-1].When - return t.Sub(prev), prev.Day() != t.Day() -} - -func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { - if DebugUseAfterFinish && tr.finishStack != nil { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) - } - - /* - NOTE TO DEBUGGERS - - If you are here because your program panicked in this code, - it is almost definitely the fault of code using this package, - and very unlikely to be the fault of this code. - - The most likely scenario is that some code elsewhere is using - a trace.Trace after its Finish method is called. - You can temporarily set the DebugUseAfterFinish var - to help discover where that is; do not leave that var set, - since it makes this package much less efficient. - */ - - e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} - tr.mu.Lock() - e.Elapsed, e.NewDay = tr.delta(e.When) - if len(tr.events) < tr.maxEvents { - tr.events = append(tr.events, e) - } else { - // Discard the middle events. - di := int((tr.maxEvents - 1) / 2) - if d, ok := tr.events[di].What.(*discarded); ok { - (*d)++ - } else { - // disc starts at two to count for the event it is replacing, - // plus the next one that we are about to drop. - tr.disc = 2 - if tr.recycler != nil && tr.events[di].Recyclable { - go tr.recycler(tr.events[di].What) - } - tr.events[di].What = &tr.disc - } - // The timestamp of the discarded meta-event should be - // the time of the last event it is representing. - tr.events[di].When = tr.events[di+1].When - - if tr.recycler != nil && tr.events[di+1].Recyclable { - go tr.recycler(tr.events[di+1].What) - } - copy(tr.events[di+1:], tr.events[di+2:]) - tr.events[tr.maxEvents-1] = e - } - tr.mu.Unlock() -} - -func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { - tr.addEvent(x, true, sensitive) -} - -func (tr *trace) LazyPrintf(format string, a ...interface{}) { - tr.addEvent(&lazySprintf{format, a}, false, false) -} - -func (tr *trace) SetError() { tr.IsError = true } - -func (tr *trace) SetRecycler(f func(interface{})) { - tr.recycler = f -} - -func (tr *trace) SetTraceInfo(traceID, spanID uint64) { - tr.traceID, tr.spanID = traceID, spanID -} - -func (tr *trace) SetMaxEvents(m int) { - // Always keep at least three events: first, discarded count, last. - if len(tr.events) == 0 && m > 3 { - tr.maxEvents = m - } -} - -func (tr *trace) ref() { - atomic.AddInt32(&tr.refs, 1) -} - -func (tr *trace) unref() { - if atomic.AddInt32(&tr.refs, -1) == 0 { - if tr.recycler != nil { - // freeTrace clears tr, so we hold tr.recycler and tr.events here. - go func(f func(interface{}), es []event) { - for _, e := range es { - if e.Recyclable { - f(e.What) - } - } - }(tr.recycler, tr.events) - } - - freeTrace(tr) - } -} - -func (tr *trace) When() string { - return tr.Start.Format("2006/01/02 15:04:05.000000") -} - -func (tr *trace) ElapsedTime() string { - t := tr.Elapsed - if t == 0 { - // Active trace. - t = time.Since(tr.Start) - } - return fmt.Sprintf("%.6f", t.Seconds()) -} - -func (tr *trace) Events() []event { - tr.mu.RLock() - defer tr.mu.RUnlock() - return tr.events -} - -var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? - -// newTrace returns a trace ready to use. -func newTrace() *trace { - select { - case tr := <-traceFreeList: - return tr - default: - return new(trace) - } -} - -// freeTrace adds tr to traceFreeList if there's room. -// This is non-blocking. -func freeTrace(tr *trace) { - if DebugUseAfterFinish { - return // never reuse - } - tr.reset() - select { - case traceFreeList <- tr: - default: - } -} - -func elapsed(d time.Duration) string { - b := []byte(fmt.Sprintf("%.6f", d.Seconds())) - - // For subsecond durations, blank all zeros before decimal point, - // and all zeros between the decimal point and the first non-zero digit. - if d < time.Second { - dot := bytes.IndexByte(b, '.') - for i := 0; i < dot; i++ { - b[i] = ' ' - } - for i := dot + 1; i < len(b); i++ { - if b[i] == '0' { - b[i] = ' ' - } else { - break - } - } - } - - return string(b) -} - -var pageTmplCache *template.Template -var pageTmplOnce sync.Once - -func pageTmpl() *template.Template { - pageTmplOnce.Do(func() { - pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ - "elapsed": elapsed, - "add": func(a, b int) int { return a + b }, - }).Parse(pageHTML)) - }) - return pageTmplCache -} - -const pageHTML = ` -{{template "Prolog" .}} -{{template "StatusTable" .}} -{{template "Epilog" .}} - -{{define "Prolog"}} - - - /debug/requests - - - - -

/debug/requests

-{{end}} {{/* end of Prolog */}} - -{{define "StatusTable"}} - - {{range $fam := .Families}} - - - - {{$n := index $.ActiveTraceCount $fam}} - - - {{$f := index $.CompletedTraces $fam}} - {{range $i, $b := $f.Buckets}} - {{$empty := $b.Empty}} - - {{end}} - - {{$nb := len $f.Buckets}} - - - - - - {{end}} -
{{$fam}} - {{if $n}}{{end}} - [{{$n}} active] - {{if $n}}{{end}} - - {{if not $empty}}{{end}} - [{{.Cond}}] - {{if not $empty}}{{end}} - - [minute] - - [hour] - - [total] -
-{{end}} {{/* end of StatusTable */}} - -{{define "Epilog"}} -{{if $.Traces}} -
-

Family: {{$.Family}}

- -{{if or $.Expanded $.Traced}} - [Normal/Summary] -{{else}} - [Normal/Summary] -{{end}} - -{{if or (not $.Expanded) $.Traced}} - [Normal/Expanded] -{{else}} - [Normal/Expanded] -{{end}} - -{{if not $.Active}} - {{if or $.Expanded (not $.Traced)}} - [Traced/Summary] - {{else}} - [Traced/Summary] - {{end}} - {{if or (not $.Expanded) (not $.Traced)}} - [Traced/Expanded] - {{else}} - [Traced/Expanded] - {{end}} -{{end}} - -{{if $.Total}} -

Showing {{len $.Traces}} of {{$.Total}} traces.

-{{end}} - - - - - {{range $tr := $.Traces}} - - - - - {{/* TODO: include traceID/spanID */}} - - {{if $.Expanded}} - {{range $tr.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
- {{if $.Active}}Active{{else}}Completed{{end}} Requests -
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
-{{end}} {{/* if $.Traces */}} - -{{if $.Histogram}} -

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

-{{$.Histogram}} -{{end}} {{/* if $.Histogram */}} - - - -{{end}} {{/* end of Epilog */}} -` diff --git a/vendor/golang.org/x/oauth2/BUILD b/vendor/golang.org/x/oauth2/BUILD index e9a0d5c21213..e647ada9db13 100644 --- a/vendor/golang.org/x/oauth2/BUILD +++ b/vendor/golang.org/x/oauth2/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -13,12 +8,32 @@ go_library( "transport.go", ], importpath = "golang.org/x/oauth2", + visibility = ["//visibility:public"], deps = [ "//vendor/golang.org/x/net/context:go_default_library", "//vendor/golang.org/x/oauth2/internal:go_default_library", ], ) +go_test( + name = "go_default_test", + srcs = [ + "oauth2_test.go", + "token_test.go", + "transport_test.go", + ], + importpath = "golang.org/x/oauth2", + library = ":go_default_library", + deps = ["//vendor/golang.org/x/net/context:go_default_library"], +) + +go_test( + name = "go_default_xtest", + srcs = ["example_test.go"], + importpath = "golang.org/x/oauth2_test", + deps = [":go_default_library"], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -30,10 +45,8 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//vendor/golang.org/x/oauth2/google:all-srcs", "//vendor/golang.org/x/oauth2/internal:all-srcs", - "//vendor/golang.org/x/oauth2/jws:all-srcs", - "//vendor/golang.org/x/oauth2/jwt:all-srcs", ], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/golang.org/x/oauth2/example_test.go b/vendor/golang.org/x/oauth2/example_test.go new file mode 100644 index 000000000000..d861fe7e3536 --- /dev/null +++ b/vendor/golang.org/x/oauth2/example_test.go @@ -0,0 +1,47 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2_test + +import ( + "context" + "fmt" + "log" + + "golang.org/x/oauth2" +) + +func ExampleConfig() { + ctx := context.Background() + conf := &oauth2.Config{ + ClientID: "YOUR_CLIENT_ID", + ClientSecret: "YOUR_CLIENT_SECRET", + Scopes: []string{"SCOPE1", "SCOPE2"}, + Endpoint: oauth2.Endpoint{ + AuthURL: "https://provider.com/o/oauth2/auth", + TokenURL: "https://provider.com/o/oauth2/token", + }, + } + + // Redirect user to consent page to ask for permission + // for the scopes specified above. + url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline) + fmt.Printf("Visit the URL for the auth dialog: %v", url) + + // Use the authorization code that is pushed to the redirect + // URL. Exchange will do the handshake to retrieve the + // initial access token. The HTTP Client returned by + // conf.Client will refresh the token as necessary. + var code string + if _, err := fmt.Scan(&code); err != nil { + log.Fatal(err) + } + tok, err := conf.Exchange(ctx, code) + if err != nil { + log.Fatal(err) + } + + client := conf.Client(ctx, tok) + client.Get("...") +} diff --git a/vendor/golang.org/x/oauth2/google/BUILD b/vendor/golang.org/x/oauth2/google/BUILD deleted file mode 100644 index 979c693d0006..000000000000 --- a/vendor/golang.org/x/oauth2/google/BUILD +++ /dev/null @@ -1,39 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "appengine.go", - "default.go", - "google.go", - "jwt.go", - "sdk.go", - ], - importpath = "golang.org/x/oauth2/google", - deps = [ - "//vendor/cloud.google.com/go/compute/metadata:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/golang.org/x/oauth2:go_default_library", - "//vendor/golang.org/x/oauth2/internal:go_default_library", - "//vendor/golang.org/x/oauth2/jws:go_default_library", - "//vendor/golang.org/x/oauth2/jwt:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go deleted file mode 100644 index dc993efb5e10..000000000000 --- a/vendor/golang.org/x/oauth2/google/appengine.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "sort" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" -) - -// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs. -var appengineVM bool - -// Set at init time by appengine_hook.go. If nil, we're not on App Engine. -var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) - -// AppEngineTokenSource returns a token source that fetches tokens -// issued to the current App Engine application's service account. -// If you are implementing a 3-legged OAuth 2.0 flow on App Engine -// that involves user accounts, see oauth2.Config instead. -// -// The provided context must have come from appengine.NewContext. -func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - if appengineTokenFunc == nil { - panic("google: AppEngineTokenSource can only be used on App Engine.") - } - scopes := append([]string{}, scope...) - sort.Strings(scopes) - return &appEngineTokenSource{ - ctx: ctx, - scopes: scopes, - key: strings.Join(scopes, " "), - } -} - -// aeTokens helps the fetched tokens to be reused until their expiration. -var ( - aeTokensMu sync.Mutex - aeTokens = make(map[string]*tokenLock) // key is space-separated scopes -) - -type tokenLock struct { - mu sync.Mutex // guards t; held while fetching or updating t - t *oauth2.Token -} - -type appEngineTokenSource struct { - ctx context.Context - scopes []string - key string // to aeTokens map; space-separated scopes -} - -func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { - if appengineTokenFunc == nil { - panic("google: AppEngineTokenSource can only be used on App Engine.") - } - - aeTokensMu.Lock() - tok, ok := aeTokens[ts.key] - if !ok { - tok = &tokenLock{} - aeTokens[ts.key] = tok - } - aeTokensMu.Unlock() - - tok.mu.Lock() - defer tok.mu.Unlock() - if tok.t.Valid() { - return tok.t, nil - } - access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) - if err != nil { - return nil, err - } - tok.t = &oauth2.Token{ - AccessToken: access, - Expiry: exp, - } - return tok.t, nil -} diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go deleted file mode 100644 index 4f42c8b34354..000000000000 --- a/vendor/golang.org/x/oauth2/google/appengine_hook.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -package google - -import "google.golang.org/appengine" - -func init() { - appengineTokenFunc = appengine.AccessToken -} diff --git a/vendor/golang.org/x/oauth2/google/appenginevm_hook.go b/vendor/golang.org/x/oauth2/google/appenginevm_hook.go deleted file mode 100644 index 633611cc3a01..000000000000 --- a/vendor/golang.org/x/oauth2/google/appenginevm_hook.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appenginevm - -package google - -import "google.golang.org/appengine" - -func init() { - appengineVM = true - appengineTokenFunc = appengine.AccessToken -} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go deleted file mode 100644 index 565d731c45b0..000000000000 --- a/vendor/golang.org/x/oauth2/google/default.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "runtime" - - "cloud.google.com/go/compute/metadata" - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/jwt" -) - -// DefaultClient returns an HTTP Client that uses the -// DefaultTokenSource to obtain authentication credentials. -// -// This client should be used when developing services -// that run on Google App Engine or Google Compute Engine -// and use "Application Default Credentials." -// -// For more details, see: -// https://developers.google.com/accounts/docs/application-default-credentials -// -func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { - ts, err := DefaultTokenSource(ctx, scope...) - if err != nil { - return nil, err - } - return oauth2.NewClient(ctx, ts), nil -} - -// DefaultTokenSource is a token source that uses -// "Application Default Credentials". -// -// It looks for credentials in the following places, -// preferring the first location found: -// -// 1. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. -// 2. A JSON file in a location known to the gcloud command-line tool. -// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. -// On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine it uses the appengine.AccessToken function. -// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches -// credentials from the metadata server. -// (In this final case any provided scopes are ignored.) -// -// For more details, see: -// https://developers.google.com/accounts/docs/application-default-credentials -// -func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { - // First, try the environment variable. - const envVar = "GOOGLE_APPLICATION_CREDENTIALS" - if filename := os.Getenv(envVar); filename != "" { - ts, err := tokenSourceFromFile(ctx, filename, scope) - if err != nil { - return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) - } - return ts, nil - } - - // Second, try a well-known file. - filename := wellKnownFile() - _, err := os.Stat(filename) - if err == nil { - ts, err2 := tokenSourceFromFile(ctx, filename, scope) - if err2 == nil { - return ts, nil - } - err = err2 - } else if os.IsNotExist(err) { - err = nil // ignore this error - } - if err != nil { - return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) - } - - // Third, if we're on Google App Engine use those credentials. - if appengineTokenFunc != nil && !appengineVM { - return AppEngineTokenSource(ctx, scope...), nil - } - - // Fourth, if we're on Google Compute Engine use the metadata server. - if metadata.OnGCE() { - return ComputeTokenSource(""), nil - } - - // None are found; return helpful error. - const url = "https://developers.google.com/accounts/docs/application-default-credentials" - return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) -} - -func wellKnownFile() string { - const f = "application_default_credentials.json" - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) - } - return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) -} - -func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - var d struct { - // Common fields - Type string - ClientID string `json:"client_id"` - - // User Credential fields - ClientSecret string `json:"client_secret"` - RefreshToken string `json:"refresh_token"` - - // Service Account fields - ClientEmail string `json:"client_email"` - PrivateKeyID string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - } - if err := json.Unmarshal(b, &d); err != nil { - return nil, err - } - switch d.Type { - case "authorized_user": - cfg := &oauth2.Config{ - ClientID: d.ClientID, - ClientSecret: d.ClientSecret, - Scopes: append([]string{}, scopes...), // copy - Endpoint: Endpoint, - } - tok := &oauth2.Token{RefreshToken: d.RefreshToken} - return cfg.TokenSource(ctx, tok), nil - case "service_account": - cfg := &jwt.Config{ - Email: d.ClientEmail, - PrivateKey: []byte(d.PrivateKey), - Scopes: append([]string{}, scopes...), // copy - TokenURL: JWTTokenURL, - } - return cfg.TokenSource(ctx), nil - case "": - return nil, errors.New("missing 'type' field in credentials") - default: - return nil, fmt.Errorf("unknown credential type: %q", d.Type) - } -} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go deleted file mode 100644 index 4f7352762b34..000000000000 --- a/vendor/golang.org/x/oauth2/google/google.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package google provides support for making OAuth2 authorized and -// authenticated HTTP requests to Google APIs. -// It supports the Web server flow, client-side credentials, service accounts, -// Google Compute Engine service accounts, and Google App Engine service -// accounts. -// -// For more information, please read -// https://developers.google.com/accounts/docs/OAuth2 -// and -// https://developers.google.com/accounts/docs/application-default-credentials. -package google - -import ( - "encoding/json" - "errors" - "fmt" - "strings" - "time" - - "cloud.google.com/go/compute/metadata" - "golang.org/x/oauth2" - "golang.org/x/oauth2/jwt" -) - -// Endpoint is Google's OAuth 2.0 endpoint. -var Endpoint = oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://accounts.google.com/o/oauth2/token", -} - -// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. -const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" - -// ConfigFromJSON uses a Google Developers Console client_credentials.json -// file to construct a config. -// client_credentials.json can be downloaded from -// https://console.developers.google.com, under "Credentials". Download the Web -// application credentials in the JSON format and provide the contents of the -// file as jsonKey. -func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { - type cred struct { - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - RedirectURIs []string `json:"redirect_uris"` - AuthURI string `json:"auth_uri"` - TokenURI string `json:"token_uri"` - } - var j struct { - Web *cred `json:"web"` - Installed *cred `json:"installed"` - } - if err := json.Unmarshal(jsonKey, &j); err != nil { - return nil, err - } - var c *cred - switch { - case j.Web != nil: - c = j.Web - case j.Installed != nil: - c = j.Installed - default: - return nil, fmt.Errorf("oauth2/google: no credentials found") - } - if len(c.RedirectURIs) < 1 { - return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") - } - return &oauth2.Config{ - ClientID: c.ClientID, - ClientSecret: c.ClientSecret, - RedirectURL: c.RedirectURIs[0], - Scopes: scope, - Endpoint: oauth2.Endpoint{ - AuthURL: c.AuthURI, - TokenURL: c.TokenURI, - }, - }, nil -} - -// JWTConfigFromJSON uses a Google Developers service account JSON key file to read -// the credentials that authorize and authenticate the requests. -// Create a service account on "Credentials" for your project at -// https://console.developers.google.com to download a JSON key file. -func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { - var key struct { - Email string `json:"client_email"` - PrivateKey string `json:"private_key"` - PrivateKeyID string `json:"private_key_id"` - TokenURL string `json:"token_uri"` - } - if err := json.Unmarshal(jsonKey, &key); err != nil { - return nil, err - } - config := &jwt.Config{ - Email: key.Email, - PrivateKey: []byte(key.PrivateKey), - PrivateKeyID: key.PrivateKeyID, - Scopes: scope, - TokenURL: key.TokenURL, - } - if config.TokenURL == "" { - config.TokenURL = JWTTokenURL - } - return config, nil -} - -// ComputeTokenSource returns a token source that fetches access tokens -// from Google Compute Engine (GCE)'s metadata server. It's only valid to use -// this token source if your program is running on a GCE instance. -// If no account is specified, "default" is used. -// Further information about retrieving access tokens from the GCE metadata -// server can be found at https://cloud.google.com/compute/docs/authentication. -func ComputeTokenSource(account string) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, computeSource{account: account}) -} - -type computeSource struct { - account string -} - -func (cs computeSource) Token() (*oauth2.Token, error) { - if !metadata.OnGCE() { - return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") - } - acct := cs.account - if acct == "" { - acct = "default" - } - tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") - if err != nil { - return nil, err - } - var res struct { - AccessToken string `json:"access_token"` - ExpiresInSec int `json:"expires_in"` - TokenType string `json:"token_type"` - } - err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) - if err != nil { - return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) - } - if res.ExpiresInSec == 0 || res.AccessToken == "" { - return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") - } - return &oauth2.Token{ - AccessToken: res.AccessToken, - TokenType: res.TokenType, - Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), - }, nil -} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go deleted file mode 100644 index b0fdb3a888ac..000000000000 --- a/vendor/golang.org/x/oauth2/google/jwt.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "crypto/rsa" - "fmt" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" - "golang.org/x/oauth2/jws" -) - -// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON -// key file to read the credentials that authorize and authenticate the -// requests, and returns a TokenSource that does not use any OAuth2 flow but -// instead creates a JWT and sends that as the access token. -// The audience is typically a URL that specifies the scope of the credentials. -// -// Note that this is not a standard OAuth flow, but rather an -// optimization supported by a few Google services. -// Unless you know otherwise, you should use JWTConfigFromJSON instead. -func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { - cfg, err := JWTConfigFromJSON(jsonKey) - if err != nil { - return nil, fmt.Errorf("google: could not parse JSON key: %v", err) - } - pk, err := internal.ParseKey(cfg.PrivateKey) - if err != nil { - return nil, fmt.Errorf("google: could not parse key: %v", err) - } - ts := &jwtAccessTokenSource{ - email: cfg.Email, - audience: audience, - pk: pk, - pkID: cfg.PrivateKeyID, - } - tok, err := ts.Token() - if err != nil { - return nil, err - } - return oauth2.ReuseTokenSource(tok, ts), nil -} - -type jwtAccessTokenSource struct { - email, audience string - pk *rsa.PrivateKey - pkID string -} - -func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { - iat := time.Now() - exp := iat.Add(time.Hour) - cs := &jws.ClaimSet{ - Iss: ts.email, - Sub: ts.email, - Aud: ts.audience, - Iat: iat.Unix(), - Exp: exp.Unix(), - } - hdr := &jws.Header{ - Algorithm: "RS256", - Typ: "JWT", - KeyID: string(ts.pkID), - } - msg, err := jws.Encode(hdr, cs, ts.pk) - if err != nil { - return nil, fmt.Errorf("google: could not encode JWT: %v", err) - } - return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil -} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go deleted file mode 100644 index d29a3bb9bbc5..000000000000 --- a/vendor/golang.org/x/oauth2/google/sdk.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "os" - "os/user" - "path/filepath" - "runtime" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" -) - -type sdkCredentials struct { - Data []struct { - Credential struct { - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - TokenExpiry *time.Time `json:"token_expiry"` - } `json:"credential"` - Key struct { - Account string `json:"account"` - Scope string `json:"scope"` - } `json:"key"` - } -} - -// An SDKConfig provides access to tokens from an account already -// authorized via the Google Cloud SDK. -type SDKConfig struct { - conf oauth2.Config - initialToken *oauth2.Token -} - -// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK -// account. If account is empty, the account currently active in -// Google Cloud SDK properties is used. -// Google Cloud SDK credentials must be created by running `gcloud auth` -// before using this function. -// The Google Cloud SDK is available at https://cloud.google.com/sdk/. -func NewSDKConfig(account string) (*SDKConfig, error) { - configPath, err := sdkConfigPath() - if err != nil { - return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) - } - credentialsPath := filepath.Join(configPath, "credentials") - f, err := os.Open(credentialsPath) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) - } - defer f.Close() - - var c sdkCredentials - if err := json.NewDecoder(f).Decode(&c); err != nil { - return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) - } - if len(c.Data) == 0 { - return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) - } - if account == "" { - propertiesPath := filepath.Join(configPath, "properties") - f, err := os.Open(propertiesPath) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) - } - defer f.Close() - ini, err := internal.ParseINI(f) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) - } - core, ok := ini["core"] - if !ok { - return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) - } - active, ok := core["account"] - if !ok { - return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) - } - account = active - } - - for _, d := range c.Data { - if account == "" || d.Key.Account == account { - if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { - return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) - } - var expiry time.Time - if d.Credential.TokenExpiry != nil { - expiry = *d.Credential.TokenExpiry - } - return &SDKConfig{ - conf: oauth2.Config{ - ClientID: d.Credential.ClientID, - ClientSecret: d.Credential.ClientSecret, - Scopes: strings.Split(d.Key.Scope, " "), - Endpoint: Endpoint, - RedirectURL: "oob", - }, - initialToken: &oauth2.Token{ - AccessToken: d.Credential.AccessToken, - RefreshToken: d.Credential.RefreshToken, - Expiry: expiry, - }, - }, nil - } - } - return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) -} - -// Client returns an HTTP client using Google Cloud SDK credentials to -// authorize requests. The token will auto-refresh as necessary. The -// underlying http.RoundTripper will be obtained using the provided -// context. The returned client and its Transport should not be -// modified. -func (c *SDKConfig) Client(ctx context.Context) *http.Client { - return &http.Client{ - Transport: &oauth2.Transport{ - Source: c.TokenSource(ctx), - }, - } -} - -// TokenSource returns an oauth2.TokenSource that retrieve tokens from -// Google Cloud SDK credentials using the provided context. -// It will returns the current access token stored in the credentials, -// and refresh it when it expires, but it won't update the credentials -// with the new access token. -func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { - return c.conf.TokenSource(ctx, c.initialToken) -} - -// Scopes are the OAuth 2.0 scopes the current account is authorized for. -func (c *SDKConfig) Scopes() []string { - return c.conf.Scopes -} - -// sdkConfigPath tries to guess where the gcloud config is located. -// It can be overridden during tests. -var sdkConfigPath = func() (string, error) { - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil - } - homeDir := guessUnixHomeDir() - if homeDir == "" { - return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") - } - return filepath.Join(homeDir, ".config", "gcloud"), nil -} - -func guessUnixHomeDir() string { - usr, err := user.Current() - if err == nil { - return usr.HomeDir - } - return os.Getenv("HOME") -} diff --git a/vendor/golang.org/x/oauth2/internal/BUILD b/vendor/golang.org/x/oauth2/internal/BUILD index 09a9e57fe988..8af2f598a814 100644 --- a/vendor/golang.org/x/oauth2/internal/BUILD +++ b/vendor/golang.org/x/oauth2/internal/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -13,6 +8,19 @@ go_library( "transport.go", ], importpath = "golang.org/x/oauth2/internal", + visibility = ["//visibility:public"], + deps = ["//vendor/golang.org/x/net/context:go_default_library"], +) + +go_test( + name = "go_default_test", + srcs = [ + "oauth2_test.go", + "token_test.go", + "transport_test.go", + ], + importpath = "golang.org/x/oauth2/internal", + library = ":go_default_library", deps = ["//vendor/golang.org/x/net/context:go_default_library"], ) @@ -27,4 +35,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/golang.org/x/oauth2/internal/oauth2_test.go b/vendor/golang.org/x/oauth2/internal/oauth2_test.go new file mode 100644 index 000000000000..c61585542d1b --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/oauth2_test.go @@ -0,0 +1,62 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "reflect" + "strings" + "testing" +) + +func TestParseINI(t *testing.T) { + tests := []struct { + ini string + want map[string]map[string]string + }{ + { + `root = toor +[foo] +bar = hop +ini = nin +`, + map[string]map[string]string{ + "": map[string]string{"root": "toor"}, + "foo": map[string]string{"bar": "hop", "ini": "nin"}, + }, + }, + { + `[empty] +[section] +empty= +`, + map[string]map[string]string{ + "": map[string]string{}, + "empty": map[string]string{}, + "section": map[string]string{"empty": ""}, + }, + }, + { + `ignore +[invalid +=stuff +;comment=true +`, + map[string]map[string]string{ + "": map[string]string{}, + }, + }, + } + for _, tt := range tests { + result, err := ParseINI(strings.NewReader(tt.ini)) + if err != nil { + t.Errorf("ParseINI(%q) error %v, want: no error", tt.ini, err) + continue + } + if !reflect.DeepEqual(result, tt.want) { + t.Errorf("ParseINI(%q) = %#v, want: %#v", tt.ini, result, tt.want) + } + } +} diff --git a/vendor/golang.org/x/oauth2/internal/token_test.go b/vendor/golang.org/x/oauth2/internal/token_test.go new file mode 100644 index 000000000000..5ed0b15d5f44 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token_test.go @@ -0,0 +1,35 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "fmt" + "testing" +) + +func TestRegisterBrokenAuthHeaderProvider(t *testing.T) { + RegisterBrokenAuthHeaderProvider("https://aaa.com/") + tokenURL := "https://aaa.com/token" + if providerAuthHeaderWorks(tokenURL) { + t.Errorf("got %q as unbroken; want broken", tokenURL) + } +} + +func Test_providerAuthHeaderWorks(t *testing.T) { + for _, p := range brokenAuthHeaderProviders { + if providerAuthHeaderWorks(p) { + t.Errorf("got %q as unbroken; want broken", p) + } + p := fmt.Sprintf("%ssomesuffix", p) + if providerAuthHeaderWorks(p) { + t.Errorf("got %q as unbroken; want broken", p) + } + } + p := "https://api.not-in-the-list-example.com/" + if !providerAuthHeaderWorks(p) { + t.Errorf("got %q as unbroken; want broken", p) + } +} diff --git a/vendor/golang.org/x/oauth2/internal/transport_test.go b/vendor/golang.org/x/oauth2/internal/transport_test.go new file mode 100644 index 000000000000..8772ec5c4585 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/transport_test.go @@ -0,0 +1,38 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "net/http" + "testing" + + "golang.org/x/net/context" +) + +func TestContextClient(t *testing.T) { + rc := &http.Client{} + RegisterContextClientFunc(func(context.Context) (*http.Client, error) { + return rc, nil + }) + + c := &http.Client{} + ctx := context.WithValue(context.Background(), HTTPClient, c) + + hc, err := ContextClient(ctx) + if err != nil { + t.Fatalf("want valid client; got err = %v", err) + } + if hc != c { + t.Fatalf("want context client = %p; got = %p", c, hc) + } + + hc, err = ContextClient(context.TODO()) + if err != nil { + t.Fatalf("want valid client; got err = %v", err) + } + if hc != rc { + t.Fatalf("want registered client = %p; got = %p", c, hc) + } +} diff --git a/vendor/golang.org/x/oauth2/jws/BUILD b/vendor/golang.org/x/oauth2/jws/BUILD deleted file mode 100644 index 98793ffc6a21..000000000000 --- a/vendor/golang.org/x/oauth2/jws/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["jws.go"], - importpath = "golang.org/x/oauth2/jws", -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go deleted file mode 100644 index 8bcecb46ba5f..000000000000 --- a/vendor/golang.org/x/oauth2/jws/jws.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jws provides a partial implementation -// of JSON Web Signature encoding and decoding. -// It exists to support the golang.org/x/oauth2 package. -// -// See RFC 7515. -// -// Deprecated: this package is not intended for public use and might be -// removed in the future. It exists for internal use only. -// Please switch to another JWS package or copy this package into your own -// source tree. -package jws - -import ( - "bytes" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "strings" - "time" -) - -// ClaimSet contains information about the JWT signature including the -// permissions being requested (scopes), the target of the token, the issuer, -// the time the token was issued, and the lifetime of the token. -type ClaimSet struct { - Iss string `json:"iss"` // email address of the client_id of the application making the access token request - Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests - Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). - Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) - Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) - Typ string `json:"typ,omitempty"` // token type (Optional). - - // Email for which the application is requesting delegated access (Optional). - Sub string `json:"sub,omitempty"` - - // The old name of Sub. Client keeps setting Prn to be - // complaint with legacy OAuth 2.0 providers. (Optional) - Prn string `json:"prn,omitempty"` - - // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 - // This array is marshalled using custom code (see (c *ClaimSet) encode()). - PrivateClaims map[string]interface{} `json:"-"` -} - -func (c *ClaimSet) encode() (string, error) { - // Reverting time back for machines whose time is not perfectly in sync. - // If client machine's time is in the future according - // to Google servers, an access token will not be issued. - now := time.Now().Add(-10 * time.Second) - if c.Iat == 0 { - c.Iat = now.Unix() - } - if c.Exp == 0 { - c.Exp = now.Add(time.Hour).Unix() - } - if c.Exp < c.Iat { - return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) - } - - b, err := json.Marshal(c) - if err != nil { - return "", err - } - - if len(c.PrivateClaims) == 0 { - return base64.RawURLEncoding.EncodeToString(b), nil - } - - // Marshal private claim set and then append it to b. - prv, err := json.Marshal(c.PrivateClaims) - if err != nil { - return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) - } - - // Concatenate public and private claim JSON objects. - if !bytes.HasSuffix(b, []byte{'}'}) { - return "", fmt.Errorf("jws: invalid JSON %s", b) - } - if !bytes.HasPrefix(prv, []byte{'{'}) { - return "", fmt.Errorf("jws: invalid JSON %s", prv) - } - b[len(b)-1] = ',' // Replace closing curly brace with a comma. - b = append(b, prv[1:]...) // Append private claims. - return base64.RawURLEncoding.EncodeToString(b), nil -} - -// Header represents the header for the signed JWS payloads. -type Header struct { - // The algorithm used for signature. - Algorithm string `json:"alg"` - - // Represents the token type. - Typ string `json:"typ"` - - // The optional hint of which key is being used. - KeyID string `json:"kid,omitempty"` -} - -func (h *Header) encode() (string, error) { - b, err := json.Marshal(h) - if err != nil { - return "", err - } - return base64.RawURLEncoding.EncodeToString(b), nil -} - -// Decode decodes a claim set from a JWS payload. -func Decode(payload string) (*ClaimSet, error) { - // decode returned id token to get expiry - s := strings.Split(payload, ".") - if len(s) < 2 { - // TODO(jbd): Provide more context about the error. - return nil, errors.New("jws: invalid token received") - } - decoded, err := base64.RawURLEncoding.DecodeString(s[1]) - if err != nil { - return nil, err - } - c := &ClaimSet{} - err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) - return c, err -} - -// Signer returns a signature for the given data. -type Signer func(data []byte) (sig []byte, err error) - -// EncodeWithSigner encodes a header and claim set with the provided signer. -func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { - head, err := header.encode() - if err != nil { - return "", err - } - cs, err := c.encode() - if err != nil { - return "", err - } - ss := fmt.Sprintf("%s.%s", head, cs) - sig, err := sg([]byte(ss)) - if err != nil { - return "", err - } - return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil -} - -// Encode encodes a signed JWS with provided header and claim set. -// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. -func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { - sg := func(data []byte) (sig []byte, err error) { - h := sha256.New() - h.Write(data) - return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) - } - return EncodeWithSigner(header, c, sg) -} - -// Verify tests whether the provided JWT token's signature was produced by the private key -// associated with the supplied public key. -func Verify(token string, key *rsa.PublicKey) error { - parts := strings.Split(token, ".") - if len(parts) != 3 { - return errors.New("jws: invalid token received, token must have 3 parts") - } - - signedContent := parts[0] + "." + parts[1] - signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) - if err != nil { - return err - } - - h := sha256.New() - h.Write([]byte(signedContent)) - return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) -} diff --git a/vendor/golang.org/x/oauth2/jwt/BUILD b/vendor/golang.org/x/oauth2/jwt/BUILD deleted file mode 100644 index 5ecbf5a5a495..000000000000 --- a/vendor/golang.org/x/oauth2/jwt/BUILD +++ /dev/null @@ -1,31 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["jwt.go"], - importpath = "golang.org/x/oauth2/jwt", - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/golang.org/x/oauth2:go_default_library", - "//vendor/golang.org/x/oauth2/internal:go_default_library", - "//vendor/golang.org/x/oauth2/jws:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go deleted file mode 100644 index f4b9523e6e4c..000000000000 --- a/vendor/golang.org/x/oauth2/jwt/jwt.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly -// known as "two-legged OAuth 2.0". -// -// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 -package jwt - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" - "golang.org/x/oauth2/jws" -) - -var ( - defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" - defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} -) - -// Config is the configuration for using JWT to fetch tokens, -// commonly known as "two-legged OAuth 2.0". -type Config struct { - // Email is the OAuth client identifier used when communicating with - // the configured OAuth provider. - Email string - - // PrivateKey contains the contents of an RSA private key or the - // contents of a PEM file that contains a private key. The provided - // private key is used to sign JWT payloads. - // PEM containers with a passphrase are not supported. - // Use the following command to convert a PKCS 12 file into a PEM. - // - // $ openssl pkcs12 -in key.p12 -out key.pem -nodes - // - PrivateKey []byte - - // PrivateKeyID contains an optional hint indicating which key is being - // used. - PrivateKeyID string - - // Subject is the optional user to impersonate. - Subject string - - // Scopes optionally specifies a list of requested permission scopes. - Scopes []string - - // TokenURL is the endpoint required to complete the 2-legged JWT flow. - TokenURL string - - // Expires optionally specifies how long the token is valid for. - Expires time.Duration -} - -// TokenSource returns a JWT TokenSource using the configuration -// in c and the HTTP client from the provided context. -func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) -} - -// Client returns an HTTP client wrapping the context's -// HTTP transport and adding Authorization headers with tokens -// obtained from c. -// -// The returned client and its Transport should not be modified. -func (c *Config) Client(ctx context.Context) *http.Client { - return oauth2.NewClient(ctx, c.TokenSource(ctx)) -} - -// jwtSource is a source that always does a signed JWT request for a token. -// It should typically be wrapped with a reuseTokenSource. -type jwtSource struct { - ctx context.Context - conf *Config -} - -func (js jwtSource) Token() (*oauth2.Token, error) { - pk, err := internal.ParseKey(js.conf.PrivateKey) - if err != nil { - return nil, err - } - hc := oauth2.NewClient(js.ctx, nil) - claimSet := &jws.ClaimSet{ - Iss: js.conf.Email, - Scope: strings.Join(js.conf.Scopes, " "), - Aud: js.conf.TokenURL, - } - if subject := js.conf.Subject; subject != "" { - claimSet.Sub = subject - // prn is the old name of sub. Keep setting it - // to be compatible with legacy OAuth 2.0 providers. - claimSet.Prn = subject - } - if t := js.conf.Expires; t > 0 { - claimSet.Exp = time.Now().Add(t).Unix() - } - payload, err := jws.Encode(defaultHeader, claimSet, pk) - if err != nil { - return nil, err - } - v := url.Values{} - v.Set("grant_type", defaultGrantType) - v.Set("assertion", payload) - resp, err := hc.PostForm(js.conf.TokenURL, v) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body) - } - // tokenRes is the JSON response body. - var tokenRes struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - IDToken string `json:"id_token"` - ExpiresIn int64 `json:"expires_in"` // relative seconds from now - } - if err := json.Unmarshal(body, &tokenRes); err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - token := &oauth2.Token{ - AccessToken: tokenRes.AccessToken, - TokenType: tokenRes.TokenType, - } - raw := make(map[string]interface{}) - json.Unmarshal(body, &raw) // no error checks for optional fields - token = token.WithExtra(raw) - - if secs := tokenRes.ExpiresIn; secs > 0 { - token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) - } - if v := tokenRes.IDToken; v != "" { - // decode returned id token to get expiry - claimSet, err := jws.Decode(v) - if err != nil { - return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) - } - token.Expiry = time.Unix(claimSet.Exp, 0) - } - return token, nil -} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 798edc809fb5..7b06bfe1ef14 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -5,7 +5,7 @@ // Package oauth2 provides support for making // OAuth2 authorized and authenticated HTTP requests. // It can additionally grant authorization with Bearer JWT. -package oauth2 +package oauth2 // import "golang.org/x/oauth2" import ( "bytes" diff --git a/vendor/golang.org/x/oauth2/oauth2_test.go b/vendor/golang.org/x/oauth2/oauth2_test.go new file mode 100644 index 000000000000..982ea99e94c1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2_test.go @@ -0,0 +1,456 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "strconv" + "testing" + "time" + + "golang.org/x/net/context" +) + +type mockTransport struct { + rt func(req *http.Request) (resp *http.Response, err error) +} + +func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + return t.rt(req) +} + +func newConf(url string) *Config { + return &Config{ + ClientID: "CLIENT_ID", + ClientSecret: "CLIENT_SECRET", + RedirectURL: "REDIRECT_URL", + Scopes: []string{"scope1", "scope2"}, + Endpoint: Endpoint{ + AuthURL: url + "/auth", + TokenURL: url + "/token", + }, + } +} + +func TestAuthCodeURL(t *testing.T) { + conf := newConf("server") + url := conf.AuthCodeURL("foo", AccessTypeOffline, ApprovalForce) + const want = "server/auth?access_type=offline&approval_prompt=force&client_id=CLIENT_ID&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=foo" + if got := url; got != want { + t.Errorf("got auth code URL = %q; want %q", got, want) + } +} + +func TestAuthCodeURL_CustomParam(t *testing.T) { + conf := newConf("server") + param := SetAuthURLParam("foo", "bar") + url := conf.AuthCodeURL("baz", param) + const want = "server/auth?client_id=CLIENT_ID&foo=bar&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=baz" + if got := url; got != want { + t.Errorf("got auth code = %q; want %q", got, want) + } +} + +func TestAuthCodeURL_Optional(t *testing.T) { + conf := &Config{ + ClientID: "CLIENT_ID", + Endpoint: Endpoint{ + AuthURL: "/auth-url", + TokenURL: "/token-url", + }, + } + url := conf.AuthCodeURL("") + const want = "/auth-url?client_id=CLIENT_ID&response_type=code" + if got := url; got != want { + t.Fatalf("got auth code = %q; want %q", got, want) + } +} + +func TestExchangeRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/token" { + t.Errorf("Unexpected exchange request URL, %v is found.", r.URL) + } + headerAuth := r.Header.Get("Authorization") + if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Failed reading request body: %s.", err) + } + if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" { + t.Errorf("Unexpected exchange payload, %v is found.", string(body)) + } + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Exchange(context.Background(), "exchange-code") + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { + t.Errorf("Unexpected access token, %#v.", tok.AccessToken) + } + if tok.TokenType != "bearer" { + t.Errorf("Unexpected token type, %#v.", tok.TokenType) + } + scope := tok.Extra("scope") + if scope != "user" { + t.Errorf("Unexpected value for scope: %v", scope) + } +} + +func TestExchangeRequest_JSONResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/token" { + t.Errorf("Unexpected exchange request URL, %v is found.", r.URL) + } + headerAuth := r.Header.Get("Authorization") + if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Failed reading request body: %s.", err) + } + if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" { + t.Errorf("Unexpected exchange payload, %v is found.", string(body)) + } + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token": "90d64460d14870c08c81352a05dedd3465940a7c", "scope": "user", "token_type": "bearer", "expires_in": 86400}`)) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Exchange(context.Background(), "exchange-code") + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { + t.Errorf("Unexpected access token, %#v.", tok.AccessToken) + } + if tok.TokenType != "bearer" { + t.Errorf("Unexpected token type, %#v.", tok.TokenType) + } + scope := tok.Extra("scope") + if scope != "user" { + t.Errorf("Unexpected value for scope: %v", scope) + } + expiresIn := tok.Extra("expires_in") + if expiresIn != float64(86400) { + t.Errorf("Unexpected non-numeric value for expires_in: %v", expiresIn) + } +} + +func TestExtraValueRetrieval(t *testing.T) { + values := url.Values{} + kvmap := map[string]string{ + "scope": "user", "token_type": "bearer", "expires_in": "86400.92", + "server_time": "1443571905.5606415", "referer_ip": "10.0.0.1", + "etag": "\"afZYj912P4alikMz_P11982\"", "request_id": "86400", + "untrimmed": " untrimmed ", + } + for key, value := range kvmap { + values.Set(key, value) + } + + tok := Token{raw: values} + scope := tok.Extra("scope") + if got, want := scope, "user"; got != want { + t.Errorf("got scope = %q; want %q", got, want) + } + serverTime := tok.Extra("server_time") + if got, want := serverTime, 1443571905.5606415; got != want { + t.Errorf("got server_time value = %v; want %v", got, want) + } + refererIP := tok.Extra("referer_ip") + if got, want := refererIP, "10.0.0.1"; got != want { + t.Errorf("got referer_ip value = %v, want %v", got, want) + } + expiresIn := tok.Extra("expires_in") + if got, want := expiresIn, 86400.92; got != want { + t.Errorf("got expires_in value = %v, want %v", got, want) + } + requestID := tok.Extra("request_id") + if got, want := requestID, int64(86400); got != want { + t.Errorf("got request_id value = %v, want %v", got, want) + } + untrimmed := tok.Extra("untrimmed") + if got, want := untrimmed, " untrimmed "; got != want { + t.Errorf("got untrimmed = %q; want %q", got, want) + } +} + +const day = 24 * time.Hour + +func TestExchangeRequest_JSONResponse_Expiry(t *testing.T) { + seconds := int32(day.Seconds()) + jsonNumberType := reflect.TypeOf(json.Number("0")) + for _, c := range []struct { + expires string + expect error + }{ + {fmt.Sprintf(`"expires_in": %d`, seconds), nil}, + {fmt.Sprintf(`"expires_in": "%d"`, seconds), nil}, // PayPal case + {fmt.Sprintf(`"expires": %d`, seconds), nil}, // Facebook case + {`"expires": false`, &json.UnmarshalTypeError{Value: "bool", Type: jsonNumberType}}, // wrong type + {`"expires": {}`, &json.UnmarshalTypeError{Value: "object", Type: jsonNumberType}}, // wrong type + {`"expires": "zzz"`, &strconv.NumError{Func: "ParseInt", Num: "zzz", Err: strconv.ErrSyntax}}, // wrong value + } { + testExchangeRequest_JSONResponse_expiry(t, c.expires, c.expect) + } +} + +func testExchangeRequest_JSONResponse_expiry(t *testing.T, exp string, expect error) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(fmt.Sprintf(`{"access_token": "90d", "scope": "user", "token_type": "bearer", %s}`, exp))) + })) + defer ts.Close() + conf := newConf(ts.URL) + t1 := time.Now().Add(day) + tok, err := conf.Exchange(context.Background(), "exchange-code") + t2 := time.Now().Add(day) + // Do a fmt.Sprint comparison so either side can be + // nil. fmt.Sprint just stringifies them to "", and no + // non-nil expected error ever stringifies as "", so this + // isn't terribly disgusting. We do this because Go 1.4 and + // Go 1.5 return a different deep value for + // json.UnmarshalTypeError. In Go 1.5, the + // json.UnmarshalTypeError contains a new field with a new + // non-zero value. Rather than ignore it here with reflect or + // add new files and +build tags, just look at the strings. + if fmt.Sprint(err) != fmt.Sprint(expect) { + t.Errorf("Error = %v; want %v", err, expect) + } + if err != nil { + return + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + expiry := tok.Expiry + if expiry.Before(t1) || expiry.After(t2) { + t.Errorf("Unexpected value for Expiry: %v (shold be between %v and %v)", expiry, t1, t2) + } +} + +func TestExchangeRequest_BadResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Exchange(context.Background(), "code") + if err != nil { + t.Fatal(err) + } + if tok.AccessToken != "" { + t.Errorf("Unexpected access token, %#v.", tok.AccessToken) + } +} + +func TestExchangeRequest_BadResponseType(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + conf := newConf(ts.URL) + _, err := conf.Exchange(context.Background(), "exchange-code") + if err == nil { + t.Error("expected error from invalid access_token type") + } +} + +func TestExchangeRequest_NonBasicAuth(t *testing.T) { + tr := &mockTransport{ + rt: func(r *http.Request) (w *http.Response, err error) { + headerAuth := r.Header.Get("Authorization") + if headerAuth != "" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + return nil, errors.New("no response") + }, + } + c := &http.Client{Transport: tr} + conf := &Config{ + ClientID: "CLIENT_ID", + Endpoint: Endpoint{ + AuthURL: "https://accounts.google.com/auth", + TokenURL: "https://accounts.google.com/token", + }, + } + + ctx := context.WithValue(context.Background(), HTTPClient, c) + conf.Exchange(ctx, "code") +} + +func TestPasswordCredentialsTokenRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + expected := "/token" + if r.URL.String() != expected { + t.Errorf("URL = %q; want %q", r.URL, expected) + } + headerAuth := r.Header.Get("Authorization") + expected = "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" + if headerAuth != expected { + t.Errorf("Authorization header = %q; want %q", headerAuth, expected) + } + headerContentType := r.Header.Get("Content-Type") + expected = "application/x-www-form-urlencoded" + if headerContentType != expected { + t.Errorf("Content-Type header = %q; want %q", headerContentType, expected) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Failed reading request body: %s.", err) + } + expected = "client_id=CLIENT_ID&grant_type=password&password=password1&scope=scope1+scope2&username=user1" + if string(body) != expected { + t.Errorf("res.Body = %q; want %q", string(body), expected) + } + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.PasswordCredentialsToken(context.Background(), "user1", "password1") + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + expected := "90d64460d14870c08c81352a05dedd3465940a7c" + if tok.AccessToken != expected { + t.Errorf("AccessToken = %q; want %q", tok.AccessToken, expected) + } + expected = "bearer" + if tok.TokenType != expected { + t.Errorf("TokenType = %q; want %q", tok.TokenType, expected) + } +} + +func TestTokenRefreshRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/somethingelse" { + return + } + if r.URL.String() != "/token" { + t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, _ := ioutil.ReadAll(r.Body) + if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" { + t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) + } + })) + defer ts.Close() + conf := newConf(ts.URL) + c := conf.Client(context.Background(), &Token{RefreshToken: "REFRESH_TOKEN"}) + c.Get(ts.URL + "/somethingelse") +} + +func TestFetchWithNoRefreshToken(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/somethingelse" { + return + } + if r.URL.String() != "/token" { + t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, _ := ioutil.ReadAll(r.Body) + if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" { + t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) + } + })) + defer ts.Close() + conf := newConf(ts.URL) + c := conf.Client(context.Background(), nil) + _, err := c.Get(ts.URL + "/somethingelse") + if err == nil { + t.Errorf("Fetch should return an error if no refresh token is set") + } +} + +func TestRefreshToken_RefreshTokenReplacement(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":"ACCESS TOKEN", "scope": "user", "token_type": "bearer", "refresh_token": "NEW REFRESH TOKEN"}`)) + return + })) + defer ts.Close() + conf := newConf(ts.URL) + tkr := tokenRefresher{ + conf: conf, + ctx: context.Background(), + refreshToken: "OLD REFRESH TOKEN", + } + tk, err := tkr.Token() + if err != nil { + t.Errorf("got err = %v; want none", err) + return + } + if tk.RefreshToken != tkr.refreshToken { + t.Errorf("tokenRefresher.refresh_token = %q; want %q", tkr.refreshToken, tk.RefreshToken) + } +} + +func TestConfigClientWithToken(t *testing.T) { + tok := &Token{ + AccessToken: "abc123", + } + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.Header.Get("Authorization"), fmt.Sprintf("Bearer %s", tok.AccessToken); got != want { + t.Errorf("Authorization header = %q; want %q", got, want) + } + return + })) + defer ts.Close() + conf := newConf(ts.URL) + + c := conf.Client(context.Background(), tok) + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Error(err) + } + _, err = c.Do(req) + if err != nil { + t.Error(err) + } +} diff --git a/vendor/golang.org/x/oauth2/token_test.go b/vendor/golang.org/x/oauth2/token_test.go new file mode 100644 index 000000000000..80db83c29cd8 --- /dev/null +++ b/vendor/golang.org/x/oauth2/token_test.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "testing" + "time" +) + +func TestTokenExtra(t *testing.T) { + type testCase struct { + key string + val interface{} + want interface{} + } + const key = "extra-key" + cases := []testCase{ + {key: key, val: "abc", want: "abc"}, + {key: key, val: 123, want: 123}, + {key: key, val: "", want: ""}, + {key: "other-key", val: "def", want: nil}, + } + for _, tc := range cases { + extra := make(map[string]interface{}) + extra[tc.key] = tc.val + tok := &Token{raw: extra} + if got, want := tok.Extra(key), tc.want; got != want { + t.Errorf("Extra(%q) = %q; want %q", key, got, want) + } + } +} + +func TestTokenExpiry(t *testing.T) { + now := time.Now() + cases := []struct { + name string + tok *Token + want bool + }{ + {name: "12 seconds", tok: &Token{Expiry: now.Add(12 * time.Second)}, want: false}, + {name: "10 seconds", tok: &Token{Expiry: now.Add(expiryDelta)}, want: true}, + {name: "-1 hour", tok: &Token{Expiry: now.Add(-1 * time.Hour)}, want: true}, + } + for _, tc := range cases { + if got, want := tc.tok.expired(), tc.want; got != want { + t.Errorf("expired (%q) = %v; want %v", tc.name, got, want) + } + } +} + +func TestTokenTypeMethod(t *testing.T) { + cases := []struct { + name string + tok *Token + want string + }{ + {name: "bearer-mixed_case", tok: &Token{TokenType: "beAREr"}, want: "Bearer"}, + {name: "default-bearer", tok: &Token{}, want: "Bearer"}, + {name: "basic", tok: &Token{TokenType: "basic"}, want: "Basic"}, + {name: "basic-capitalized", tok: &Token{TokenType: "Basic"}, want: "Basic"}, + {name: "mac", tok: &Token{TokenType: "mac"}, want: "MAC"}, + {name: "mac-caps", tok: &Token{TokenType: "MAC"}, want: "MAC"}, + {name: "mac-mixed_case", tok: &Token{TokenType: "mAc"}, want: "MAC"}, + } + for _, tc := range cases { + if got, want := tc.tok.Type(), tc.want; got != want { + t.Errorf("TokenType(%q) = %v; want %v", tc.name, got, want) + } + } +} diff --git a/vendor/golang.org/x/oauth2/transport_test.go b/vendor/golang.org/x/oauth2/transport_test.go new file mode 100644 index 000000000000..d6e8087d6e4f --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport_test.go @@ -0,0 +1,108 @@ +package oauth2 + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" +) + +type tokenSource struct{ token *Token } + +func (t *tokenSource) Token() (*Token, error) { + return t.token, nil +} + +func TestTransportNilTokenSource(t *testing.T) { + tr := &Transport{} + server := newMockServer(func(w http.ResponseWriter, r *http.Request) {}) + defer server.Close() + client := &http.Client{Transport: tr} + resp, err := client.Get(server.URL) + if err == nil { + t.Errorf("got no errors, want an error with nil token source") + } + if resp != nil { + t.Errorf("Response = %v; want nil", resp) + } +} + +func TestTransportTokenSource(t *testing.T) { + ts := &tokenSource{ + token: &Token{ + AccessToken: "abc", + }, + } + tr := &Transport{ + Source: ts, + } + server := newMockServer(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.Header.Get("Authorization"), "Bearer abc"; got != want { + t.Errorf("Authorization header = %q; want %q", got, want) + } + }) + defer server.Close() + client := &http.Client{Transport: tr} + res, err := client.Get(server.URL) + if err != nil { + t.Fatal(err) + } + res.Body.Close() +} + +// Test for case-sensitive token types, per https://github.com/golang/oauth2/issues/113 +func TestTransportTokenSourceTypes(t *testing.T) { + const val = "abc" + tests := []struct { + key string + val string + want string + }{ + {key: "bearer", val: val, want: "Bearer abc"}, + {key: "mac", val: val, want: "MAC abc"}, + {key: "basic", val: val, want: "Basic abc"}, + } + for _, tc := range tests { + ts := &tokenSource{ + token: &Token{ + AccessToken: tc.val, + TokenType: tc.key, + }, + } + tr := &Transport{ + Source: ts, + } + server := newMockServer(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.Header.Get("Authorization"), tc.want; got != want { + t.Errorf("Authorization header (%q) = %q; want %q", val, got, want) + } + }) + defer server.Close() + client := &http.Client{Transport: tr} + res, err := client.Get(server.URL) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + } +} + +func TestTokenValidNoAccessToken(t *testing.T) { + token := &Token{} + if token.Valid() { + t.Errorf("got valid with no access token; want invalid") + } +} + +func TestExpiredWithExpiry(t *testing.T) { + token := &Token{ + Expiry: time.Now().Add(-5 * time.Hour), + } + if token.Valid() { + t.Errorf("got valid with expired token; want invalid") + } +} + +func newMockServer(handler func(w http.ResponseWriter, r *http.Request)) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(handler)) +} diff --git a/vendor/golang.org/x/sys/.gitattributes b/vendor/golang.org/x/sys/.gitattributes new file mode 100644 index 000000000000..d2f212e5da80 --- /dev/null +++ b/vendor/golang.org/x/sys/.gitattributes @@ -0,0 +1,10 @@ +# Treat all files in this repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. +# +# We'll prevent accidental CRLF line endings from entering the repo +# via the git-review gofmt checks. +# +# See golang.org/issue/9281 + +* -text diff --git a/vendor/golang.org/x/sys/.gitignore b/vendor/golang.org/x/sys/.gitignore new file mode 100644 index 000000000000..8339fd61d3f4 --- /dev/null +++ b/vendor/golang.org/x/sys/.gitignore @@ -0,0 +1,2 @@ +# Add no patterns to .hgignore except for files generated by the build. +last-change diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS new file mode 100644 index 000000000000..15167cd746c5 --- /dev/null +++ b/vendor/golang.org/x/sys/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sys/CONTRIBUTING.md b/vendor/golang.org/x/sys/CONTRIBUTING.md new file mode 100644 index 000000000000..88dff59bc7d2 --- /dev/null +++ b/vendor/golang.org/x/sys/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS new file mode 100644 index 000000000000..1c4577e96806 --- /dev/null +++ b/vendor/golang.org/x/sys/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sys/README b/vendor/golang.org/x/sys/README new file mode 100644 index 000000000000..bd422b40c231 --- /dev/null +++ b/vendor/golang.org/x/sys/README @@ -0,0 +1,3 @@ +This repository holds supplemental Go packages for low-level interactions with the operating system. + +To submit changes to this repository, see http://golang.org/doc/contribute.html. diff --git a/vendor/golang.org/x/sys/codereview.cfg b/vendor/golang.org/x/sys/codereview.cfg new file mode 100644 index 000000000000..3f8b14b64e83 --- /dev/null +++ b/vendor/golang.org/x/sys/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/sys/unix/BUILD b/vendor/golang.org/x/sys/unix/BUILD index d0eb013bae6a..eea4a9450744 100644 --- a/vendor/golang.org/x/sys/unix/BUILD +++ b/vendor/golang.org/x/sys/unix/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -52,6 +47,52 @@ go_library( }), cgo = True, importpath = "golang.org/x/sys/unix", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = select({ + "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "export_test.go", + ], + "@io_bazel_rules_go//go/platform:linux_amd64": [ + "export_test.go", + ], + "//conditions:default": [], + }), + importpath = "golang.org/x/sys/unix", + library = ":go_default_library", +) + +go_test( + name = "go_default_xtest", + srcs = select({ + "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "mmap_unix_test.go", + "syscall_bsd_test.go", + "syscall_test.go", + "syscall_unix_test.go", + ], + "@io_bazel_rules_go//go/platform:linux_amd64": [ + "creds_test.go", + "mmap_unix_test.go", + "syscall_linux_test.go", + "syscall_test.go", + "syscall_unix_test.go", + ], + "//conditions:default": [], + }), + importpath = "golang.org/x/sys/unix_test", + deps = select({ + "@io_bazel_rules_go//go/platform:darwin_amd64": [ + ":go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux_amd64": [ + ":go_default_library", + ], + "//conditions:default": [], + }), ) filegroup( @@ -65,4 +106,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/golang.org/x/sys/unix/creds_test.go b/vendor/golang.org/x/sys/unix/creds_test.go new file mode 100644 index 000000000000..eaae7c367faf --- /dev/null +++ b/vendor/golang.org/x/sys/unix/creds_test.go @@ -0,0 +1,121 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package unix_test + +import ( + "bytes" + "net" + "os" + "syscall" + "testing" + + "golang.org/x/sys/unix" +) + +// TestSCMCredentials tests the sending and receiving of credentials +// (PID, UID, GID) in an ancillary message between two UNIX +// sockets. The SO_PASSCRED socket option is enabled on the sending +// socket for this to work. +func TestSCMCredentials(t *testing.T) { + fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("Socketpair: %v", err) + } + defer unix.Close(fds[0]) + defer unix.Close(fds[1]) + + err = unix.SetsockoptInt(fds[0], unix.SOL_SOCKET, unix.SO_PASSCRED, 1) + if err != nil { + t.Fatalf("SetsockoptInt: %v", err) + } + + srvFile := os.NewFile(uintptr(fds[0]), "server") + defer srvFile.Close() + srv, err := net.FileConn(srvFile) + if err != nil { + t.Errorf("FileConn: %v", err) + return + } + defer srv.Close() + + cliFile := os.NewFile(uintptr(fds[1]), "client") + defer cliFile.Close() + cli, err := net.FileConn(cliFile) + if err != nil { + t.Errorf("FileConn: %v", err) + return + } + defer cli.Close() + + var ucred unix.Ucred + if os.Getuid() != 0 { + ucred.Pid = int32(os.Getpid()) + ucred.Uid = 0 + ucred.Gid = 0 + oob := unix.UnixCredentials(&ucred) + _, _, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil) + if op, ok := err.(*net.OpError); ok { + err = op.Err + } + if sys, ok := err.(*os.SyscallError); ok { + err = sys.Err + } + if err != syscall.EPERM { + t.Fatalf("WriteMsgUnix failed with %v, want EPERM", err) + } + } + + ucred.Pid = int32(os.Getpid()) + ucred.Uid = uint32(os.Getuid()) + ucred.Gid = uint32(os.Getgid()) + oob := unix.UnixCredentials(&ucred) + + // this is going to send a dummy byte + n, oobn, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil) + if err != nil { + t.Fatalf("WriteMsgUnix: %v", err) + } + if n != 0 { + t.Fatalf("WriteMsgUnix n = %d, want 0", n) + } + if oobn != len(oob) { + t.Fatalf("WriteMsgUnix oobn = %d, want %d", oobn, len(oob)) + } + + oob2 := make([]byte, 10*len(oob)) + n, oobn2, flags, _, err := srv.(*net.UnixConn).ReadMsgUnix(nil, oob2) + if err != nil { + t.Fatalf("ReadMsgUnix: %v", err) + } + if flags != 0 { + t.Fatalf("ReadMsgUnix flags = 0x%x, want 0", flags) + } + if n != 1 { + t.Fatalf("ReadMsgUnix n = %d, want 1 (dummy byte)", n) + } + if oobn2 != oobn { + // without SO_PASSCRED set on the socket, ReadMsgUnix will + // return zero oob bytes + t.Fatalf("ReadMsgUnix oobn = %d, want %d", oobn2, oobn) + } + oob2 = oob2[:oobn2] + if !bytes.Equal(oob, oob2) { + t.Fatal("ReadMsgUnix oob bytes don't match") + } + + scm, err := unix.ParseSocketControlMessage(oob2) + if err != nil { + t.Fatalf("ParseSocketControlMessage: %v", err) + } + newUcred, err := unix.ParseUnixCredentials(&scm[0]) + if err != nil { + t.Fatalf("ParseUnixCredentials: %v", err) + } + if *newUcred != ucred { + t.Fatalf("ParseUnixCredentials = %+v, want %+v", newUcred, ucred) + } +} diff --git a/vendor/golang.org/x/sys/unix/export_test.go b/vendor/golang.org/x/sys/unix/export_test.go new file mode 100644 index 000000000000..b4fdd970b6cf --- /dev/null +++ b/vendor/golang.org/x/sys/unix/export_test.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix + +var Itoa = itoa diff --git a/vendor/golang.org/x/sys/unix/mmap_unix_test.go b/vendor/golang.org/x/sys/unix/mmap_unix_test.go new file mode 100644 index 000000000000..18ccec05f13f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mmap_unix_test.go @@ -0,0 +1,23 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix_test + +import ( + "testing" + + "golang.org/x/sys/unix" +) + +func TestMmap(t *testing.T) { + b, err := unix.Mmap(-1, 0, unix.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_PRIVATE) + if err != nil { + t.Fatalf("Mmap: %v", err) + } + if err := unix.Munmap(b); err != nil { + t.Fatalf("Munmap: %v", err) + } +} diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index 571e6993c3c7..a0bcf842ca8f 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -19,7 +19,7 @@ // These calls return err == nil to indicate success; otherwise // err represents an operating system error describing the failure and // holds a value of type syscall.Errno. -package unix +package unix // import "golang.org/x/sys/unix" import "unsafe" diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd_test.go b/vendor/golang.org/x/sys/unix/syscall_bsd_test.go new file mode 100644 index 000000000000..2ad51290ccac --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_bsd_test.go @@ -0,0 +1,47 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd openbsd + +package unix_test + +import ( + "runtime" + "testing" + + "golang.org/x/sys/unix" +) + +const MNT_WAIT = 1 + +func TestGetfsstat(t *testing.T) { + n, err := unix.Getfsstat(nil, MNT_WAIT) + if err != nil { + t.Fatal(err) + } + + data := make([]unix.Statfs_t, n) + n, err = unix.Getfsstat(data, MNT_WAIT) + if err != nil { + t.Fatal(err) + } + + empty := unix.Statfs_t{} + for _, stat := range data { + if stat == empty { + t.Fatal("an empty Statfs_t struct was returned") + } + } +} + +func TestSysctlRaw(t *testing.T) { + if runtime.GOOS == "openbsd" { + t.Skip("kern.proc.pid does not exist on OpenBSD") + } + + _, err := unix.SysctlRaw("kern.proc.pid", unix.Getpid()) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go new file mode 100644 index 000000000000..cd13080ad77f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go @@ -0,0 +1,24 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd + +package unix_test + +import ( + "os" + "testing" + + "golang.org/x/sys/unix" +) + +func TestSysctUint64(t *testing.T) { + _, err := unix.SysctlUint64("vm.max_kernel_address") + if err != nil { + if os.Getenv("GO_BUILDER_NAME") == "freebsd-386-gce101" { + t.Skipf("Ignoring known failing test (golang.org/issue/15186). Failed with: %v", err) + } + t.Fatal(err) + } +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_test.go b/vendor/golang.org/x/sys/unix/syscall_linux_test.go new file mode 100644 index 000000000000..91184cae0696 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_test.go @@ -0,0 +1,186 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package unix_test + +import ( + "io/ioutil" + "os" + "testing" + "time" + + "golang.org/x/sys/unix" +) + +func TestPoll(t *testing.T) { + f, cleanup := mktmpfifo(t) + defer cleanup() + + const timeout = 100 + + ok := make(chan bool, 1) + go func() { + select { + case <-time.After(10 * timeout * time.Millisecond): + t.Errorf("Poll: failed to timeout after %d milliseconds", 10*timeout) + case <-ok: + } + }() + + fds := []unix.PollFd{{Fd: int32(f.Fd()), Events: unix.POLLIN}} + n, err := unix.Poll(fds, timeout) + ok <- true + if err != nil { + t.Errorf("Poll: unexpected error: %v", err) + return + } + if n != 0 { + t.Errorf("Poll: wrong number of events: got %v, expected %v", n, 0) + return + } +} + +func TestPpoll(t *testing.T) { + f, cleanup := mktmpfifo(t) + defer cleanup() + + const timeout = 100 * time.Millisecond + + ok := make(chan bool, 1) + go func() { + select { + case <-time.After(10 * timeout): + t.Errorf("Ppoll: failed to timeout after %d", 10*timeout) + case <-ok: + } + }() + + fds := []unix.PollFd{{Fd: int32(f.Fd()), Events: unix.POLLIN}} + timeoutTs := unix.NsecToTimespec(int64(timeout)) + n, err := unix.Ppoll(fds, &timeoutTs, nil) + ok <- true + if err != nil { + t.Errorf("Ppoll: unexpected error: %v", err) + return + } + if n != 0 { + t.Errorf("Ppoll: wrong number of events: got %v, expected %v", n, 0) + return + } +} + +// mktmpfifo creates a temporary FIFO and provides a cleanup function. +func mktmpfifo(t *testing.T) (*os.File, func()) { + err := unix.Mkfifo("fifo", 0666) + if err != nil { + t.Fatalf("mktmpfifo: failed to create FIFO: %v", err) + } + + f, err := os.OpenFile("fifo", os.O_RDWR, 0666) + if err != nil { + os.Remove("fifo") + t.Fatalf("mktmpfifo: failed to open FIFO: %v", err) + } + + return f, func() { + f.Close() + os.Remove("fifo") + } +} + +func TestTime(t *testing.T) { + var ut unix.Time_t + ut2, err := unix.Time(&ut) + if err != nil { + t.Fatalf("Time: %v", err) + } + if ut != ut2 { + t.Errorf("Time: return value %v should be equal to argument %v", ut2, ut) + } + + var now time.Time + + for i := 0; i < 10; i++ { + ut, err = unix.Time(nil) + if err != nil { + t.Fatalf("Time: %v", err) + } + + now = time.Now() + + if int64(ut) == now.Unix() { + return + } + } + + t.Errorf("Time: return value %v should be nearly equal to time.Now().Unix() %v", ut, now.Unix()) +} + +func TestUtime(t *testing.T) { + defer chtmpdir(t)() + + touch(t, "file1") + + buf := &unix.Utimbuf{ + Modtime: 12345, + } + + err := unix.Utime("file1", buf) + if err != nil { + t.Fatalf("Utime: %v", err) + } + + fi, err := os.Stat("file1") + if err != nil { + t.Fatal(err) + } + + if fi.ModTime().Unix() != 12345 { + t.Errorf("Utime: failed to change modtime: expected %v, got %v", 12345, fi.ModTime().Unix()) + } +} + +func TestGetrlimit(t *testing.T) { + var rlim unix.Rlimit + err := unix.Getrlimit(unix.RLIMIT_AS, &rlim) + if err != nil { + t.Fatalf("Getrlimit: %v", err) + } +} + +// utilities taken from os/os_test.go + +func touch(t *testing.T, name string) { + f, err := os.Create(name) + if err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } +} + +// chtmpdir changes the working directory to a new temporary directory and +// provides a cleanup function. Used when PWD is read-only. +func chtmpdir(t *testing.T) func() { + oldwd, err := os.Getwd() + if err != nil { + t.Fatalf("chtmpdir: %v", err) + } + d, err := ioutil.TempDir("", "test") + if err != nil { + t.Fatalf("chtmpdir: %v", err) + } + if err := os.Chdir(d); err != nil { + t.Fatalf("chtmpdir: %v", err) + } + return func() { + if err := os.Chdir(oldwd); err != nil { + t.Fatalf("chtmpdir: %v", err) + } + os.RemoveAll(d) + } +} diff --git a/vendor/golang.org/x/sys/unix/syscall_test.go b/vendor/golang.org/x/sys/unix/syscall_test.go new file mode 100644 index 000000000000..95eac92acadb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_test.go @@ -0,0 +1,50 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix_test + +import ( + "fmt" + "testing" + + "golang.org/x/sys/unix" +) + +func testSetGetenv(t *testing.T, key, value string) { + err := unix.Setenv(key, value) + if err != nil { + t.Fatalf("Setenv failed to set %q: %v", value, err) + } + newvalue, found := unix.Getenv(key) + if !found { + t.Fatalf("Getenv failed to find %v variable (want value %q)", key, value) + } + if newvalue != value { + t.Fatalf("Getenv(%v) = %q; want %q", key, newvalue, value) + } +} + +func TestEnv(t *testing.T) { + testSetGetenv(t, "TESTENV", "AVALUE") + // make sure TESTENV gets set to "", not deleted + testSetGetenv(t, "TESTENV", "") +} + +func TestItoa(t *testing.T) { + // Make most negative integer: 0x8000... + i := 1 + for i<<1 != 0 { + i <<= 1 + } + if i >= 0 { + t.Fatal("bad math") + } + s := unix.Itoa(i) + f := fmt.Sprint(i) + if s != f { + t.Fatalf("itoa(%d) = %s, want %s", i, s, f) + } +} diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_test.go b/vendor/golang.org/x/sys/unix/syscall_unix_test.go new file mode 100644 index 000000000000..49208a000daf --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_unix_test.go @@ -0,0 +1,353 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix_test + +import ( + "flag" + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "runtime" + "testing" + "time" + + "golang.org/x/sys/unix" +) + +// Tests that below functions, structures and constants are consistent +// on all Unix-like systems. +func _() { + // program scheduling priority functions and constants + var ( + _ func(int, int, int) error = unix.Setpriority + _ func(int, int) (int, error) = unix.Getpriority + ) + const ( + _ int = unix.PRIO_USER + _ int = unix.PRIO_PROCESS + _ int = unix.PRIO_PGRP + ) + + // termios constants + const ( + _ int = unix.TCIFLUSH + _ int = unix.TCIOFLUSH + _ int = unix.TCOFLUSH + ) + + // fcntl file locking structure and constants + var ( + _ = unix.Flock_t{ + Type: int16(0), + Whence: int16(0), + Start: int64(0), + Len: int64(0), + Pid: int32(0), + } + ) + const ( + _ = unix.F_GETLK + _ = unix.F_SETLK + _ = unix.F_SETLKW + ) +} + +// TestFcntlFlock tests whether the file locking structure matches +// the calling convention of each kernel. +func TestFcntlFlock(t *testing.T) { + name := filepath.Join(os.TempDir(), "TestFcntlFlock") + fd, err := unix.Open(name, unix.O_CREAT|unix.O_RDWR|unix.O_CLOEXEC, 0) + if err != nil { + t.Fatalf("Open failed: %v", err) + } + defer unix.Unlink(name) + defer unix.Close(fd) + flock := unix.Flock_t{ + Type: unix.F_RDLCK, + Start: 0, Len: 0, Whence: 1, + } + if err := unix.FcntlFlock(uintptr(fd), unix.F_GETLK, &flock); err != nil { + t.Fatalf("FcntlFlock failed: %v", err) + } +} + +// TestPassFD tests passing a file descriptor over a Unix socket. +// +// This test involved both a parent and child process. The parent +// process is invoked as a normal test, with "go test", which then +// runs the child process by running the current test binary with args +// "-test.run=^TestPassFD$" and an environment variable used to signal +// that the test should become the child process instead. +func TestPassFD(t *testing.T) { + switch runtime.GOOS { + case "dragonfly": + // TODO(jsing): Figure out why sendmsg is returning EINVAL. + t.Skip("skipping test on dragonfly") + case "solaris": + // TODO(aram): Figure out why ReadMsgUnix is returning empty message. + t.Skip("skipping test on solaris, see issue 7402") + } + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + passFDChild() + return + } + + tempDir, err := ioutil.TempDir("", "TestPassFD") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("Socketpair: %v", err) + } + defer unix.Close(fds[0]) + defer unix.Close(fds[1]) + writeFile := os.NewFile(uintptr(fds[0]), "child-writes") + readFile := os.NewFile(uintptr(fds[1]), "parent-reads") + defer writeFile.Close() + defer readFile.Close() + + cmd := exec.Command(os.Args[0], "-test.run=^TestPassFD$", "--", tempDir) + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + if lp := os.Getenv("LD_LIBRARY_PATH"); lp != "" { + cmd.Env = append(cmd.Env, "LD_LIBRARY_PATH="+lp) + } + cmd.ExtraFiles = []*os.File{writeFile} + + out, err := cmd.CombinedOutput() + if len(out) > 0 || err != nil { + t.Fatalf("child process: %q, %v", out, err) + } + + c, err := net.FileConn(readFile) + if err != nil { + t.Fatalf("FileConn: %v", err) + } + defer c.Close() + + uc, ok := c.(*net.UnixConn) + if !ok { + t.Fatalf("unexpected FileConn type; expected UnixConn, got %T", c) + } + + buf := make([]byte, 32) // expect 1 byte + oob := make([]byte, 32) // expect 24 bytes + closeUnix := time.AfterFunc(5*time.Second, func() { + t.Logf("timeout reading from unix socket") + uc.Close() + }) + _, oobn, _, _, err := uc.ReadMsgUnix(buf, oob) + closeUnix.Stop() + + scms, err := unix.ParseSocketControlMessage(oob[:oobn]) + if err != nil { + t.Fatalf("ParseSocketControlMessage: %v", err) + } + if len(scms) != 1 { + t.Fatalf("expected 1 SocketControlMessage; got scms = %#v", scms) + } + scm := scms[0] + gotFds, err := unix.ParseUnixRights(&scm) + if err != nil { + t.Fatalf("unix.ParseUnixRights: %v", err) + } + if len(gotFds) != 1 { + t.Fatalf("wanted 1 fd; got %#v", gotFds) + } + + f := os.NewFile(uintptr(gotFds[0]), "fd-from-child") + defer f.Close() + + got, err := ioutil.ReadAll(f) + want := "Hello from child process!\n" + if string(got) != want { + t.Errorf("child process ReadAll: %q, %v; want %q", got, err, want) + } +} + +// passFDChild is the child process used by TestPassFD. +func passFDChild() { + defer os.Exit(0) + + // Look for our fd. It should be fd 3, but we work around an fd leak + // bug here (http://golang.org/issue/2603) to let it be elsewhere. + var uc *net.UnixConn + for fd := uintptr(3); fd <= 10; fd++ { + f := os.NewFile(fd, "unix-conn") + var ok bool + netc, _ := net.FileConn(f) + uc, ok = netc.(*net.UnixConn) + if ok { + break + } + } + if uc == nil { + fmt.Println("failed to find unix fd") + return + } + + // Make a file f to send to our parent process on uc. + // We make it in tempDir, which our parent will clean up. + flag.Parse() + tempDir := flag.Arg(0) + f, err := ioutil.TempFile(tempDir, "") + if err != nil { + fmt.Printf("TempFile: %v", err) + return + } + + f.Write([]byte("Hello from child process!\n")) + f.Seek(0, 0) + + rights := unix.UnixRights(int(f.Fd())) + dummyByte := []byte("x") + n, oobn, err := uc.WriteMsgUnix(dummyByte, rights, nil) + if err != nil { + fmt.Printf("WriteMsgUnix: %v", err) + return + } + if n != 1 || oobn != len(rights) { + fmt.Printf("WriteMsgUnix = %d, %d; want 1, %d", n, oobn, len(rights)) + return + } +} + +// TestUnixRightsRoundtrip tests that UnixRights, ParseSocketControlMessage, +// and ParseUnixRights are able to successfully round-trip lists of file descriptors. +func TestUnixRightsRoundtrip(t *testing.T) { + testCases := [...][][]int{ + {{42}}, + {{1, 2}}, + {{3, 4, 5}}, + {{}}, + {{1, 2}, {3, 4, 5}, {}, {7}}, + } + for _, testCase := range testCases { + b := []byte{} + var n int + for _, fds := range testCase { + // Last assignment to n wins + n = len(b) + unix.CmsgLen(4*len(fds)) + b = append(b, unix.UnixRights(fds...)...) + } + // Truncate b + b = b[:n] + + scms, err := unix.ParseSocketControlMessage(b) + if err != nil { + t.Fatalf("ParseSocketControlMessage: %v", err) + } + if len(scms) != len(testCase) { + t.Fatalf("expected %v SocketControlMessage; got scms = %#v", len(testCase), scms) + } + for i, scm := range scms { + gotFds, err := unix.ParseUnixRights(&scm) + if err != nil { + t.Fatalf("ParseUnixRights: %v", err) + } + wantFds := testCase[i] + if len(gotFds) != len(wantFds) { + t.Fatalf("expected %v fds, got %#v", len(wantFds), gotFds) + } + for j, fd := range gotFds { + if fd != wantFds[j] { + t.Fatalf("expected fd %v, got %v", wantFds[j], fd) + } + } + } + } +} + +func TestRlimit(t *testing.T) { + var rlimit, zero unix.Rlimit + err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit) + if err != nil { + t.Fatalf("Getrlimit: save failed: %v", err) + } + if zero == rlimit { + t.Fatalf("Getrlimit: save failed: got zero value %#v", rlimit) + } + set := rlimit + set.Cur = set.Max - 1 + err = unix.Setrlimit(unix.RLIMIT_NOFILE, &set) + if err != nil { + t.Fatalf("Setrlimit: set failed: %#v %v", set, err) + } + var get unix.Rlimit + err = unix.Getrlimit(unix.RLIMIT_NOFILE, &get) + if err != nil { + t.Fatalf("Getrlimit: get failed: %v", err) + } + set = rlimit + set.Cur = set.Max - 1 + if set != get { + // Seems like Darwin requires some privilege to + // increase the soft limit of rlimit sandbox, though + // Setrlimit never reports an error. + switch runtime.GOOS { + case "darwin": + default: + t.Fatalf("Rlimit: change failed: wanted %#v got %#v", set, get) + } + } + err = unix.Setrlimit(unix.RLIMIT_NOFILE, &rlimit) + if err != nil { + t.Fatalf("Setrlimit: restore failed: %#v %v", rlimit, err) + } +} + +func TestSeekFailure(t *testing.T) { + _, err := unix.Seek(-1, 0, 0) + if err == nil { + t.Fatalf("Seek(-1, 0, 0) did not fail") + } + str := err.Error() // used to crash on Linux + t.Logf("Seek: %v", str) + if str == "" { + t.Fatalf("Seek(-1, 0, 0) return error with empty message") + } +} + +func TestDup(t *testing.T) { + file, err := ioutil.TempFile("", "TestDup") + if err != nil { + t.Fatalf("Tempfile failed: %v", err) + } + defer os.Remove(file.Name()) + defer file.Close() + f := int(file.Fd()) + + newFd, err := unix.Dup(f) + if err != nil { + t.Fatalf("Dup: %v", err) + } + + err = unix.Dup2(newFd, newFd+1) + if err != nil { + t.Fatalf("Dup2: %v", err) + } + + b1 := []byte("Test123") + b2 := make([]byte, 7) + _, err = unix.Write(newFd+1, b1) + if err != nil { + t.Fatalf("Write to dup2 fd failed: %v", err) + } + _, err = unix.Seek(f, 0, 0) + _, err = unix.Read(f, b2) + if err != nil { + t.Fatalf("Read back failed: %v", err) + } + if string(b1) != string(b2) { + t.Errorf("Dup: stdout write not in file, expected %v, got %v", string(b1), string(b2)) + } +} diff --git a/vendor/golang.org/x/tools/.gitattributes b/vendor/golang.org/x/tools/.gitattributes new file mode 100644 index 000000000000..d2f212e5da80 --- /dev/null +++ b/vendor/golang.org/x/tools/.gitattributes @@ -0,0 +1,10 @@ +# Treat all files in this repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. +# +# We'll prevent accidental CRLF line endings from entering the repo +# via the git-review gofmt checks. +# +# See golang.org/issue/9281 + +* -text diff --git a/vendor/golang.org/x/tools/.gitignore b/vendor/golang.org/x/tools/.gitignore new file mode 100644 index 000000000000..5a9d62efd402 --- /dev/null +++ b/vendor/golang.org/x/tools/.gitignore @@ -0,0 +1,2 @@ +# Add no patterns to .gitignore except for files generated by the build. +last-change diff --git a/vendor/golang.org/x/tools/AUTHORS b/vendor/golang.org/x/tools/AUTHORS new file mode 100644 index 000000000000..15167cd746c5 --- /dev/null +++ b/vendor/golang.org/x/tools/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/tools/CONTRIBUTING.md b/vendor/golang.org/x/tools/CONTRIBUTING.md new file mode 100644 index 000000000000..88dff59bc7d2 --- /dev/null +++ b/vendor/golang.org/x/tools/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/tools/CONTRIBUTORS b/vendor/golang.org/x/tools/CONTRIBUTORS new file mode 100644 index 000000000000..1c4577e96806 --- /dev/null +++ b/vendor/golang.org/x/tools/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/tools/README b/vendor/golang.org/x/tools/README new file mode 100644 index 000000000000..d5944c6db045 --- /dev/null +++ b/vendor/golang.org/x/tools/README @@ -0,0 +1,10 @@ +This subrepository holds the source for various packages and tools that support +the Go programming language. + +Some of the tools, godoc and vet for example, are included in binary Go distributions. +Others, including the Go guru and the test coverage tool, can be fetched with "go get". + +Packages include a type-checker for Go and an implementation of the +Static Single Assignment form (SSA) representation for Go programs. + +To submit changes to this repository, see http://golang.org/doc/contribute.html. diff --git a/vendor/golang.org/x/tools/codereview.cfg b/vendor/golang.org/x/tools/codereview.cfg new file mode 100644 index 000000000000..3f8b14b64e83 --- /dev/null +++ b/vendor/golang.org/x/tools/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/tools/go/gcexportdata/BUILD b/vendor/golang.org/x/tools/go/gcexportdata/BUILD index 3ca1c1f90b3d..704fdf8b6007 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/BUILD +++ b/vendor/golang.org/x/tools/go/gcexportdata/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -12,9 +7,20 @@ go_library( "importer.go", ], importpath = "golang.org/x/tools/go/gcexportdata", + visibility = ["//visibility:public"], deps = ["//vendor/golang.org/x/tools/go/gcimporter15:go_default_library"], ) +go_test( + name = "go_default_xtest", + srcs = [ + "example_test.go", + "gcexportdata_test.go", + ], + importpath = "golang.org/x/tools/go/gcexportdata_test", + deps = [":go_default_library"], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -26,4 +32,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/golang.org/x/tools/go/gcexportdata/example_test.go b/vendor/golang.org/x/tools/go/gcexportdata/example_test.go new file mode 100644 index 000000000000..1d7f47279872 --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/example_test.go @@ -0,0 +1,118 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package gcexportdata_test + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "go/types" + "log" + "os" + "path/filepath" + + "golang.org/x/tools/go/gcexportdata" +) + +// ExampleRead uses gcexportdata.Read to load type information for the +// "fmt" package from the fmt.a file produced by the gc compiler. +func ExampleRead() { + // Find the export data file. + filename, path := gcexportdata.Find("fmt", "") + if filename == "" { + log.Fatalf("can't find export data for fmt") + } + fmt.Printf("Package path: %s\n", path) + fmt.Printf("Export data: %s\n", filepath.Base(filename)) + + // Open and read the file. + f, err := os.Open(filename) + if err != nil { + log.Fatal(err) + } + defer f.Close() + r, err := gcexportdata.NewReader(f) + if err != nil { + log.Fatalf("reading export data %s: %v", filename, err) + } + + // Decode the export data. + fset := token.NewFileSet() + imports := make(map[string]*types.Package) + pkg, err := gcexportdata.Read(r, fset, imports, path) + if err != nil { + log.Fatal(err) + } + + // Print package information. + fmt.Printf("Package members: %s...\n", pkg.Scope().Names()[:5]) + println := pkg.Scope().Lookup("Println") + posn := fset.Position(println.Pos()) + posn.Line = 123 // make example deterministic + fmt.Printf("Println type: %s\n", println.Type()) + fmt.Printf("Println location: %s\n", slashify(posn)) + + // Output: + // + // Package path: fmt + // Export data: fmt.a + // Package members: [Errorf Formatter Fprint Fprintf Fprintln]... + // Println type: func(a ...interface{}) (n int, err error) + // Println location: $GOROOT/src/fmt/print.go:123:1 +} + +// ExampleNewImporter demonstrates usage of NewImporter to provide type +// information for dependencies when type-checking Go source code. +func ExampleNewImporter() { + const src = `package twopi + +import "math" + +const TwoPi = 2 * math.Pi +` + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "twopi.go", src, 0) + if err != nil { + log.Fatal(err) + } + + packages := make(map[string]*types.Package) + imp := gcexportdata.NewImporter(fset, packages) + conf := types.Config{Importer: imp} + pkg, err := conf.Check("twopi", fset, []*ast.File{f}, nil) + if err != nil { + log.Fatal(err) + } + + // object from imported package + pi := packages["math"].Scope().Lookup("Pi") + fmt.Printf("const %s.%s %s = %s // %s\n", + pi.Pkg().Path(), + pi.Name(), + pi.Type(), + pi.(*types.Const).Val(), + slashify(fset.Position(pi.Pos()))) + + // object in source package + twopi := pkg.Scope().Lookup("TwoPi") + fmt.Printf("const %s %s = %s // %s\n", + twopi.Name(), + twopi.Type(), + twopi.(*types.Const).Val(), + slashify(fset.Position(twopi.Pos()))) + + // Output: + // + // const math.Pi untyped float = 3.14159 // $GOROOT/src/math/const.go:11:1 + // const TwoPi untyped float = 6.28319 // twopi.go:5:7 +} + +func slashify(posn token.Position) token.Position { + posn.Filename = filepath.ToSlash(posn.Filename) // for MS Windows portability + return posn +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 23dc019289e5..572da5c929f3 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -21,7 +21,7 @@ // Go 1.8 export data files, so they will work before and after the // Go update. (See discussion at https://github.com/golang/go/issues/15651.) // -package gcexportdata +package gcexportdata // import "golang.org/x/tools/go/gcexportdata" import ( "bufio" diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata_test.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata_test.go new file mode 100644 index 000000000000..69133db9752b --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata_test.go @@ -0,0 +1,41 @@ +package gcexportdata_test + +import ( + "go/token" + "go/types" + "log" + "os" + "testing" + + "golang.org/x/tools/go/gcexportdata" +) + +// Test to ensure that gcexportdata can read files produced by App +// Engine Go runtime v1.6. +func TestAppEngine16(t *testing.T) { + // Open and read the file. + f, err := os.Open("testdata/errors-ae16.a") + if err != nil { + t.Fatal(err) + } + defer f.Close() + r, err := gcexportdata.NewReader(f) + if err != nil { + log.Fatalf("reading export data: %v", err) + } + + // Decode the export data. + fset := token.NewFileSet() + imports := make(map[string]*types.Package) + pkg, err := gcexportdata.Read(r, fset, imports, "errors") + if err != nil { + log.Fatal(err) + } + + // Print package information. + got := pkg.Scope().Lookup("New").Type().String() + want := "func(text string) error" + if got != want { + t.Errorf("New.Type = %s, want %s", got, want) + } +} diff --git a/vendor/golang.org/x/tools/go/gcimporter15/BUILD b/vendor/golang.org/x/tools/go/gcimporter15/BUILD index 0134227f8452..9dde591760f2 100644 --- a/vendor/golang.org/x/tools/go/gcimporter15/BUILD +++ b/vendor/golang.org/x/tools/go/gcimporter15/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -16,6 +11,28 @@ go_library( "isAlias19.go", ], importpath = "golang.org/x/tools/go/gcimporter15", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["gcimporter_test.go"], + importpath = "golang.org/x/tools/go/gcimporter15", + library = ":go_default_library", +) + +go_test( + name = "go_default_xtest", + srcs = [ + "bexport19_test.go", + "bexport_test.go", + ], + importpath = "golang.org/x/tools/go/gcimporter15_test", + deps = [ + ":go_default_library", + "//vendor/golang.org/x/tools/go/buildutil:go_default_library", + "//vendor/golang.org/x/tools/go/loader:go_default_library", + ], ) filegroup( @@ -29,4 +46,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/golang.org/x/tools/go/gcimporter15/bexport19_test.go b/vendor/golang.org/x/tools/go/gcimporter15/bexport19_test.go new file mode 100644 index 000000000000..f6e9733b13fd --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcimporter15/bexport19_test.go @@ -0,0 +1,93 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package gcimporter_test + +import ( + "go/ast" + "go/parser" + "go/token" + "go/types" + "testing" + + gcimporter "golang.org/x/tools/go/gcimporter15" +) + +const src = ` +package p + +type ( + T0 = int32 + T1 = struct{} + T2 = struct{ T1 } + Invalid = foo // foo is undeclared +) +` + +func checkPkg(t *testing.T, pkg *types.Package, label string) { + T1 := types.NewStruct(nil, nil) + T2 := types.NewStruct([]*types.Var{types.NewField(0, pkg, "T1", T1, true)}, nil) + + for _, test := range []struct { + name string + typ types.Type + }{ + {"T0", types.Typ[types.Int32]}, + {"T1", T1}, + {"T2", T2}, + {"Invalid", types.Typ[types.Invalid]}, + } { + obj := pkg.Scope().Lookup(test.name) + if obj == nil { + t.Errorf("%s: %s not found", label, test.name) + continue + } + tname, _ := obj.(*types.TypeName) + if tname == nil { + t.Errorf("%s: %v not a type name", label, obj) + continue + } + if !tname.IsAlias() { + t.Errorf("%s: %v: not marked as alias", label, tname) + continue + } + if got := tname.Type(); !types.Identical(got, test.typ) { + t.Errorf("%s: %v: got %v; want %v", label, tname, got, test.typ) + } + } +} + +func TestTypeAliases(t *testing.T) { + // parse and typecheck + fset1 := token.NewFileSet() + f, err := parser.ParseFile(fset1, "p.go", src, 0) + if err != nil { + t.Fatal(err) + } + var conf types.Config + pkg1, err := conf.Check("p", fset1, []*ast.File{f}, nil) + if err == nil { + // foo in undeclared in src; we should see an error + t.Fatal("invalid source type-checked without error") + } + if pkg1 == nil { + // despite incorrect src we should see a (partially) type-checked package + t.Fatal("nil package returned") + } + checkPkg(t, pkg1, "export") + + // export + exportdata := gcimporter.BExportData(fset1, pkg1) + + // import + imports := make(map[string]*types.Package) + fset2 := token.NewFileSet() + _, pkg2, err := gcimporter.BImportData(fset2, imports, exportdata, pkg1.Path()) + if err != nil { + t.Fatalf("BImportData(%s): %v", pkg1.Path(), err) + } + checkPkg(t, pkg2, "import") +} diff --git a/vendor/golang.org/x/tools/go/gcimporter15/bexport_test.go b/vendor/golang.org/x/tools/go/gcimporter15/bexport_test.go new file mode 100644 index 000000000000..6ab3bcb1020b --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcimporter15/bexport_test.go @@ -0,0 +1,326 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter_test + +import ( + "fmt" + "go/ast" + "go/build" + "go/constant" + "go/parser" + "go/token" + "go/types" + "reflect" + "runtime" + "strings" + "testing" + + "golang.org/x/tools/go/buildutil" + gcimporter "golang.org/x/tools/go/gcimporter15" + "golang.org/x/tools/go/loader" +) + +func TestBExportData_stdlib(t *testing.T) { + if runtime.GOOS == "android" { + t.Skipf("incomplete std lib on %s", runtime.GOOS) + } + + // Load, parse and type-check the program. + ctxt := build.Default // copy + ctxt.GOPATH = "" // disable GOPATH + conf := loader.Config{ + Build: &ctxt, + AllowErrors: true, + } + for _, path := range buildutil.AllPackages(conf.Build) { + conf.Import(path) + } + + // Create a package containing type and value errors to ensure + // they are properly encoded/decoded. + f, err := conf.ParseFile("haserrors/haserrors.go", `package haserrors +const UnknownValue = "" + 0 +type UnknownType undefined +`) + if err != nil { + t.Fatal(err) + } + conf.CreateFromFiles("haserrors", f) + + prog, err := conf.Load() + if err != nil { + t.Fatalf("Load failed: %v", err) + } + + numPkgs := len(prog.AllPackages) + if want := 248; numPkgs < want { + t.Errorf("Loaded only %d packages, want at least %d", numPkgs, want) + } + + for pkg, info := range prog.AllPackages { + if info.Files == nil { + continue // empty directory + } + exportdata := gcimporter.BExportData(conf.Fset, pkg) + + imports := make(map[string]*types.Package) + fset2 := token.NewFileSet() + n, pkg2, err := gcimporter.BImportData(fset2, imports, exportdata, pkg.Path()) + if err != nil { + t.Errorf("BImportData(%s): %v", pkg.Path(), err) + continue + } + if n != len(exportdata) { + t.Errorf("BImportData(%s) decoded %d bytes, want %d", + pkg.Path(), n, len(exportdata)) + } + + // Compare the packages' corresponding members. + for _, name := range pkg.Scope().Names() { + if !ast.IsExported(name) { + continue + } + obj1 := pkg.Scope().Lookup(name) + obj2 := pkg2.Scope().Lookup(name) + if obj2 == nil { + t.Errorf("%s.%s not found, want %s", pkg.Path(), name, obj1) + continue + } + + fl1 := fileLine(conf.Fset, obj1) + fl2 := fileLine(fset2, obj2) + if fl1 != fl2 { + t.Errorf("%s.%s: got posn %s, want %s", + pkg.Path(), name, fl2, fl1) + } + + if err := equalObj(obj1, obj2); err != nil { + t.Errorf("%s.%s: %s\ngot: %s\nwant: %s", + pkg.Path(), name, err, obj2, obj1) + } + } + } +} + +func fileLine(fset *token.FileSet, obj types.Object) string { + posn := fset.Position(obj.Pos()) + return fmt.Sprintf("%s:%d", posn.Filename, posn.Line) +} + +// equalObj reports how x and y differ. They are assumed to belong to +// different universes so cannot be compared directly. +func equalObj(x, y types.Object) error { + if reflect.TypeOf(x) != reflect.TypeOf(y) { + return fmt.Errorf("%T vs %T", x, y) + } + xt := x.Type() + yt := y.Type() + switch x.(type) { + case *types.Var, *types.Func: + // ok + case *types.Const: + xval := x.(*types.Const).Val() + yval := y.(*types.Const).Val() + // Use string comparison for floating-point values since rounding is permitted. + if constant.Compare(xval, token.NEQ, yval) && + !(xval.Kind() == constant.Float && xval.String() == yval.String()) { + return fmt.Errorf("unequal constants %s vs %s", xval, yval) + } + case *types.TypeName: + xt = xt.Underlying() + yt = yt.Underlying() + default: + return fmt.Errorf("unexpected %T", x) + } + return equalType(xt, yt) +} + +func equalType(x, y types.Type) error { + if reflect.TypeOf(x) != reflect.TypeOf(y) { + return fmt.Errorf("unequal kinds: %T vs %T", x, y) + } + switch x := x.(type) { + case *types.Interface: + y := y.(*types.Interface) + // TODO(gri): enable separate emission of Embedded interfaces + // and ExplicitMethods then use this logic. + // if x.NumEmbeddeds() != y.NumEmbeddeds() { + // return fmt.Errorf("unequal number of embedded interfaces: %d vs %d", + // x.NumEmbeddeds(), y.NumEmbeddeds()) + // } + // for i := 0; i < x.NumEmbeddeds(); i++ { + // xi := x.Embedded(i) + // yi := y.Embedded(i) + // if xi.String() != yi.String() { + // return fmt.Errorf("mismatched %th embedded interface: %s vs %s", + // i, xi, yi) + // } + // } + // if x.NumExplicitMethods() != y.NumExplicitMethods() { + // return fmt.Errorf("unequal methods: %d vs %d", + // x.NumExplicitMethods(), y.NumExplicitMethods()) + // } + // for i := 0; i < x.NumExplicitMethods(); i++ { + // xm := x.ExplicitMethod(i) + // ym := y.ExplicitMethod(i) + // if xm.Name() != ym.Name() { + // return fmt.Errorf("mismatched %th method: %s vs %s", i, xm, ym) + // } + // if err := equalType(xm.Type(), ym.Type()); err != nil { + // return fmt.Errorf("mismatched %s method: %s", xm.Name(), err) + // } + // } + if x.NumMethods() != y.NumMethods() { + return fmt.Errorf("unequal methods: %d vs %d", + x.NumMethods(), y.NumMethods()) + } + for i := 0; i < x.NumMethods(); i++ { + xm := x.Method(i) + ym := y.Method(i) + if xm.Name() != ym.Name() { + return fmt.Errorf("mismatched %dth method: %s vs %s", i, xm, ym) + } + if err := equalType(xm.Type(), ym.Type()); err != nil { + return fmt.Errorf("mismatched %s method: %s", xm.Name(), err) + } + } + case *types.Array: + y := y.(*types.Array) + if x.Len() != y.Len() { + return fmt.Errorf("unequal array lengths: %d vs %d", x.Len(), y.Len()) + } + if err := equalType(x.Elem(), y.Elem()); err != nil { + return fmt.Errorf("array elements: %s", err) + } + case *types.Basic: + y := y.(*types.Basic) + if x.Kind() != y.Kind() { + return fmt.Errorf("unequal basic types: %s vs %s", x, y) + } + case *types.Chan: + y := y.(*types.Chan) + if x.Dir() != y.Dir() { + return fmt.Errorf("unequal channel directions: %d vs %d", x.Dir(), y.Dir()) + } + if err := equalType(x.Elem(), y.Elem()); err != nil { + return fmt.Errorf("channel elements: %s", err) + } + case *types.Map: + y := y.(*types.Map) + if err := equalType(x.Key(), y.Key()); err != nil { + return fmt.Errorf("map keys: %s", err) + } + if err := equalType(x.Elem(), y.Elem()); err != nil { + return fmt.Errorf("map values: %s", err) + } + case *types.Named: + y := y.(*types.Named) + if x.String() != y.String() { + return fmt.Errorf("unequal named types: %s vs %s", x, y) + } + case *types.Pointer: + y := y.(*types.Pointer) + if err := equalType(x.Elem(), y.Elem()); err != nil { + return fmt.Errorf("pointer elements: %s", err) + } + case *types.Signature: + y := y.(*types.Signature) + if err := equalType(x.Params(), y.Params()); err != nil { + return fmt.Errorf("parameters: %s", err) + } + if err := equalType(x.Results(), y.Results()); err != nil { + return fmt.Errorf("results: %s", err) + } + if x.Variadic() != y.Variadic() { + return fmt.Errorf("unequal varidicity: %t vs %t", + x.Variadic(), y.Variadic()) + } + if (x.Recv() != nil) != (y.Recv() != nil) { + return fmt.Errorf("unequal receivers: %s vs %s", x.Recv(), y.Recv()) + } + if x.Recv() != nil { + // TODO(adonovan): fix: this assertion fires for interface methods. + // The type of the receiver of an interface method is a named type + // if the Package was loaded from export data, or an unnamed (interface) + // type if the Package was produced by type-checking ASTs. + // if err := equalType(x.Recv().Type(), y.Recv().Type()); err != nil { + // return fmt.Errorf("receiver: %s", err) + // } + } + case *types.Slice: + y := y.(*types.Slice) + if err := equalType(x.Elem(), y.Elem()); err != nil { + return fmt.Errorf("slice elements: %s", err) + } + case *types.Struct: + y := y.(*types.Struct) + if x.NumFields() != y.NumFields() { + return fmt.Errorf("unequal struct fields: %d vs %d", + x.NumFields(), y.NumFields()) + } + for i := 0; i < x.NumFields(); i++ { + xf := x.Field(i) + yf := y.Field(i) + if xf.Name() != yf.Name() { + return fmt.Errorf("mismatched fields: %s vs %s", xf, yf) + } + if err := equalType(xf.Type(), yf.Type()); err != nil { + return fmt.Errorf("struct field %s: %s", xf.Name(), err) + } + if x.Tag(i) != y.Tag(i) { + return fmt.Errorf("struct field %s has unequal tags: %q vs %q", + xf.Name(), x.Tag(i), y.Tag(i)) + } + } + case *types.Tuple: + y := y.(*types.Tuple) + if x.Len() != y.Len() { + return fmt.Errorf("unequal tuple lengths: %d vs %d", x.Len(), y.Len()) + } + for i := 0; i < x.Len(); i++ { + if err := equalType(x.At(i).Type(), y.At(i).Type()); err != nil { + return fmt.Errorf("tuple element %d: %s", i, err) + } + } + } + return nil +} + +// TestVeryLongFile tests the position of an import object declared in +// a very long input file. Line numbers greater than maxlines are +// reported as line 1, not garbage or token.NoPos. +func TestVeryLongFile(t *testing.T) { + // parse and typecheck + longFile := "package foo" + strings.Repeat("\n", 123456) + "var X int" + fset1 := token.NewFileSet() + f, err := parser.ParseFile(fset1, "foo.go", longFile, 0) + if err != nil { + t.Fatal(err) + } + var conf types.Config + pkg, err := conf.Check("foo", fset1, []*ast.File{f}, nil) + if err != nil { + t.Fatal(err) + } + + // export + exportdata := gcimporter.BExportData(fset1, pkg) + + // import + imports := make(map[string]*types.Package) + fset2 := token.NewFileSet() + _, pkg2, err := gcimporter.BImportData(fset2, imports, exportdata, pkg.Path()) + if err != nil { + t.Fatalf("BImportData(%s): %v", pkg.Path(), err) + } + + // compare + posn1 := fset1.Position(pkg.Scope().Lookup("X").Pos()) + posn2 := fset2.Position(pkg2.Scope().Lookup("X").Pos()) + if want := "foo.go:1:1"; posn2.String() != want { + t.Errorf("X position = %s, want %s (orig was %s)", + posn2, want, posn1) + } +} diff --git a/vendor/golang.org/x/tools/go/gcimporter15/gcimporter.go b/vendor/golang.org/x/tools/go/gcimporter15/gcimporter.go index 370203b9d928..6fbc9d7617bd 100644 --- a/vendor/golang.org/x/tools/go/gcimporter15/gcimporter.go +++ b/vendor/golang.org/x/tools/go/gcimporter15/gcimporter.go @@ -13,7 +13,7 @@ // Deprecated: this package will be deleted in October 2017. // New code should use golang.org/x/tools/go/gcexportdata. // -package gcimporter +package gcimporter // import "golang.org/x/tools/go/gcimporter15" import ( "bufio" diff --git a/vendor/golang.org/x/tools/go/gcimporter15/gcimporter_test.go b/vendor/golang.org/x/tools/go/gcimporter15/gcimporter_test.go new file mode 100644 index 000000000000..d63f11220c12 --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcimporter15/gcimporter_test.go @@ -0,0 +1,501 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/gcimporter_test.go, +// adjusted to make it build with code from (std lib) internal/testenv copied. + +package gcimporter + +import ( + "bytes" + "fmt" + "go/types" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + "time" +) + +// ---------------------------------------------------------------------------- +// The following three functions (Builder, HasGoBuild, MustHaveGoBuild) were +// copied from $GOROOT/src/internal/testenv since that package is not available +// in x/tools. + +// Builder reports the name of the builder running this test +// (for example, "linux-amd64" or "windows-386-gce"). +// If the test is not running on the build infrastructure, +// Builder returns the empty string. +func Builder() string { + return os.Getenv("GO_BUILDER_NAME") +} + +// HasGoBuild reports whether the current system can build programs with ``go build'' +// and then run them with os.StartProcess or exec.Command. +func HasGoBuild() bool { + switch runtime.GOOS { + case "android", "nacl": + return false + case "darwin": + if strings.HasPrefix(runtime.GOARCH, "arm") { + return false + } + } + return true +} + +// MustHaveGoBuild checks that the current system can build programs with ``go build'' +// and then run them with os.StartProcess or exec.Command. +// If not, MustHaveGoBuild calls t.Skip with an explanation. +func MustHaveGoBuild(t *testing.T) { + if !HasGoBuild() { + t.Skipf("skipping test: 'go build' not available on %s/%s", runtime.GOOS, runtime.GOARCH) + } +} + +// ---------------------------------------------------------------------------- + +// skipSpecialPlatforms causes the test to be skipped for platforms where +// builders (build.golang.org) don't have access to compiled packages for +// import. +func skipSpecialPlatforms(t *testing.T) { + switch platform := runtime.GOOS + "-" + runtime.GOARCH; platform { + case "nacl-amd64p32", + "nacl-386", + "nacl-arm", + "darwin-arm", + "darwin-arm64": + t.Skipf("no compiled packages available for import on %s", platform) + } +} + +func compile(t *testing.T, dirname, filename string) string { + /* testenv. */ MustHaveGoBuild(t) + cmd := exec.Command("go", "tool", "compile", filename) + cmd.Dir = dirname + out, err := cmd.CombinedOutput() + if err != nil { + t.Logf("%s", out) + t.Fatalf("go tool compile %s failed: %s", filename, err) + } + // filename should end with ".go" + return filepath.Join(dirname, filename[:len(filename)-2]+"o") +} + +func testPath(t *testing.T, path, srcDir string) *types.Package { + t0 := time.Now() + pkg, err := Import(make(map[string]*types.Package), path, srcDir) + if err != nil { + t.Errorf("testPath(%s): %s", path, err) + return nil + } + t.Logf("testPath(%s): %v", path, time.Since(t0)) + return pkg +} + +const maxTime = 30 * time.Second + +func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) { + dirname := filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_"+runtime.GOARCH, dir) + list, err := ioutil.ReadDir(dirname) + if err != nil { + t.Fatalf("testDir(%s): %s", dirname, err) + } + for _, f := range list { + if time.Now().After(endTime) { + t.Log("testing time used up") + return + } + switch { + case !f.IsDir(): + // try extensions + for _, ext := range pkgExts { + if strings.HasSuffix(f.Name(), ext) { + name := f.Name()[0 : len(f.Name())-len(ext)] // remove extension + if testPath(t, filepath.Join(dir, name), dir) != nil { + nimports++ + } + } + } + case f.IsDir(): + nimports += testDir(t, filepath.Join(dir, f.Name()), endTime) + } + } + return +} + +const testfile = "exports.go" + +func TestImportTestdata(t *testing.T) { + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + return + } + + if outFn := compile(t, "testdata", testfile); outFn != "" { + defer os.Remove(outFn) + } + + // filename should end with ".go" + filename := testfile[:len(testfile)-3] + if pkg := testPath(t, "./testdata/"+filename, "."); pkg != nil { + // The package's Imports list must include all packages + // explicitly imported by testfile, plus all packages + // referenced indirectly via exported objects in testfile. + // With the textual export format (when run against Go1.6), + // the list may also include additional packages that are + // not strictly required for import processing alone (they + // are exported to err "on the safe side"). + // For now, we just test the presence of a few packages + // that we know are there for sure. + got := fmt.Sprint(pkg.Imports()) + for _, want := range []string{"go/ast", "go/token"} { + if !strings.Contains(got, want) { + t.Errorf(`Package("exports").Imports() = %s, does not contain %s`, got, want) + } + } + } +} + +func TestVersionHandling(t *testing.T) { + skipSpecialPlatforms(t) // we really only need to exclude nacl platforms, but this is fine + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + return + } + + const dir = "./testdata/versions" + list, err := ioutil.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + for _, f := range list { + name := f.Name() + if !strings.HasSuffix(name, ".a") { + continue // not a package file + } + if strings.Contains(name, "corrupted") { + continue // don't process a leftover corrupted file + } + pkgpath := "./" + name[:len(name)-2] + + // test that export data can be imported + _, err := Import(make(map[string]*types.Package), pkgpath, dir) + if err != nil { + t.Errorf("import %q failed: %v", pkgpath, err) + continue + } + + // create file with corrupted export data + // 1) read file + data, err := ioutil.ReadFile(filepath.Join(dir, name)) + if err != nil { + t.Fatal(err) + } + // 2) find export data + i := bytes.Index(data, []byte("\n$$B\n")) + 5 + j := bytes.Index(data[i:], []byte("\n$$\n")) + i + if i < 0 || j < 0 || i > j { + t.Fatalf("export data section not found (i = %d, j = %d)", i, j) + } + // 3) corrupt the data (increment every 7th byte) + for k := j - 13; k >= i; k -= 7 { + data[k]++ + } + // 4) write the file + pkgpath += "_corrupted" + filename := filepath.Join(dir, pkgpath) + ".a" + ioutil.WriteFile(filename, data, 0666) + defer os.Remove(filename) + + // test that importing the corrupted file results in an error + _, err = Import(make(map[string]*types.Package), pkgpath, dir) + if err == nil { + t.Errorf("import corrupted %q succeeded", pkgpath) + } else if msg := err.Error(); !strings.Contains(msg, "version skew") { + t.Errorf("import %q error incorrect (%s)", pkgpath, msg) + } + } +} + +func TestImportStdLib(t *testing.T) { + skipSpecialPlatforms(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + return + } + + dt := maxTime + if testing.Short() && /* testenv. */ Builder() == "" { + dt = 10 * time.Millisecond + } + nimports := testDir(t, "", time.Now().Add(dt)) // installed packages + t.Logf("tested %d imports", nimports) +} + +var importedObjectTests = []struct { + name string + want string +}{ + {"math.Pi", "const Pi untyped float"}, + {"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"}, + // Go 1.7 and 1.8 don't know about embedded interfaces. Leave this + // test out for now - the code is tested in the std library anyway. + // TODO(gri) enable again once we're off 1.7 and 1.8. + // {"io.ReadWriter", "type ReadWriter interface{Reader; Writer}"}, + {"math.Sin", "func Sin(x float64) float64"}, + // TODO(gri) add more tests +} + +func TestImportedTypes(t *testing.T) { + skipSpecialPlatforms(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + return + } + + for _, test := range importedObjectTests { + s := strings.Split(test.name, ".") + if len(s) != 2 { + t.Fatal("inconsistent test data") + } + importPath := s[0] + objName := s[1] + + pkg, err := Import(make(map[string]*types.Package), importPath, ".") + if err != nil { + t.Error(err) + continue + } + + obj := pkg.Scope().Lookup(objName) + if obj == nil { + t.Errorf("%s: object not found", test.name) + continue + } + + got := types.ObjectString(obj, types.RelativeTo(pkg)) + if got != test.want { + t.Errorf("%s: got %q; want %q", test.name, got, test.want) + } + } +} + +func TestIssue5815(t *testing.T) { + skipSpecialPlatforms(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + return + } + + pkg, err := Import(make(map[string]*types.Package), "strings", ".") + if err != nil { + t.Fatal(err) + } + + scope := pkg.Scope() + for _, name := range scope.Names() { + obj := scope.Lookup(name) + if obj.Pkg() == nil { + t.Errorf("no pkg for %s", obj) + } + if tname, _ := obj.(*types.TypeName); tname != nil { + named := tname.Type().(*types.Named) + for i := 0; i < named.NumMethods(); i++ { + m := named.Method(i) + if m.Pkg() == nil { + t.Errorf("no pkg for %s", m) + } + } + } + } +} + +// Smoke test to ensure that imported methods get the correct package. +func TestCorrectMethodPackage(t *testing.T) { + skipSpecialPlatforms(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + return + } + + imports := make(map[string]*types.Package) + _, err := Import(imports, "net/http", ".") + if err != nil { + t.Fatal(err) + } + + mutex := imports["sync"].Scope().Lookup("Mutex").(*types.TypeName).Type() + mset := types.NewMethodSet(types.NewPointer(mutex)) // methods of *sync.Mutex + sel := mset.Lookup(nil, "Lock") + lock := sel.Obj().(*types.Func) + if got, want := lock.Pkg().Path(), "sync"; got != want { + t.Errorf("got package path %q; want %q", got, want) + } +} + +func TestIssue13566(t *testing.T) { + skipSpecialPlatforms(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + return + } + + // On windows, we have to set the -D option for the compiler to avoid having a drive + // letter and an illegal ':' in the import path - just skip it (see also issue #3483). + if runtime.GOOS == "windows" { + t.Skip("avoid dealing with relative paths/drive letters on windows") + } + + if f := compile(t, "testdata", "a.go"); f != "" { + defer os.Remove(f) + } + if f := compile(t, "testdata", "b.go"); f != "" { + defer os.Remove(f) + } + + // import must succeed (test for issue at hand) + pkg, err := Import(make(map[string]*types.Package), "./testdata/b", ".") + if err != nil { + t.Fatal(err) + } + + // make sure all indirectly imported packages have names + for _, imp := range pkg.Imports() { + if imp.Name() == "" { + t.Errorf("no name for %s package", imp.Path()) + } + } +} + +func TestIssue13898(t *testing.T) { + skipSpecialPlatforms(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + return + } + + // import go/internal/gcimporter which imports go/types partially + imports := make(map[string]*types.Package) + _, err := Import(imports, "go/internal/gcimporter", ".") + if err != nil { + t.Fatal(err) + } + + // look for go/types package + var goTypesPkg *types.Package + for path, pkg := range imports { + if path == "go/types" { + goTypesPkg = pkg + break + } + } + if goTypesPkg == nil { + t.Fatal("go/types not found") + } + + // look for go/types.Object type + obj := goTypesPkg.Scope().Lookup("Object") + if obj == nil { + t.Fatal("go/types.Object not found") + } + typ, ok := obj.Type().(*types.Named) + if !ok { + t.Fatalf("go/types.Object type is %v; wanted named type", typ) + } + + // lookup go/types.Object.Pkg method + m, index, indirect := types.LookupFieldOrMethod(typ, false, nil, "Pkg") + if m == nil { + t.Fatalf("go/types.Object.Pkg not found (index = %v, indirect = %v)", index, indirect) + } + + // the method must belong to go/types + if m.Pkg().Path() != "go/types" { + t.Fatalf("found %v; want go/types", m.Pkg()) + } +} + +func TestIssue15517(t *testing.T) { + skipSpecialPlatforms(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + return + } + + // On windows, we have to set the -D option for the compiler to avoid having a drive + // letter and an illegal ':' in the import path - just skip it (see also issue #3483). + if runtime.GOOS == "windows" { + t.Skip("avoid dealing with relative paths/drive letters on windows") + } + + if f := compile(t, "testdata", "p.go"); f != "" { + defer os.Remove(f) + } + + // Multiple imports of p must succeed without redeclaration errors. + // We use an import path that's not cleaned up so that the eventual + // file path for the package is different from the package path; this + // will expose the error if it is present. + // + // (Issue: Both the textual and the binary importer used the file path + // of the package to be imported as key into the shared packages map. + // However, the binary importer then used the package path to identify + // the imported package to mark it as complete; effectively marking the + // wrong package as complete. By using an "unclean" package path, the + // file and package path are different, exposing the problem if present. + // The same issue occurs with vendoring.) + imports := make(map[string]*types.Package) + for i := 0; i < 3; i++ { + if _, err := Import(imports, "./././testdata/p", "."); err != nil { + t.Fatal(err) + } + } +} + +func TestIssue15920(t *testing.T) { + skipSpecialPlatforms(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + return + } + + // On windows, we have to set the -D option for the compiler to avoid having a drive + // letter and an illegal ':' in the import path - just skip it (see also issue #3483). + if runtime.GOOS == "windows" { + t.Skip("avoid dealing with relative paths/drive letters on windows") + } + + if f := compile(t, "testdata", "issue15920.go"); f != "" { + defer os.Remove(f) + } + + imports := make(map[string]*types.Package) + if _, err := Import(imports, "./testdata/issue15920", "."); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/google.golang.org/api/LICENSE b/vendor/google.golang.org/api/LICENSE deleted file mode 100644 index 263aa7a0c123..000000000000 --- a/vendor/google.golang.org/api/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/api/gensupport/BUILD b/vendor/google.golang.org/api/gensupport/BUILD deleted file mode 100644 index 415fd39ce7c7..000000000000 --- a/vendor/google.golang.org/api/gensupport/BUILD +++ /dev/null @@ -1,40 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "backoff.go", - "buffer.go", - "doc.go", - "json.go", - "media.go", - "params.go", - "resumable.go", - "retry.go", - "send.go", - ], - importpath = "google.golang.org/api/gensupport", - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/golang.org/x/net/context/ctxhttp:go_default_library", - "//vendor/google.golang.org/api/googleapi:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/api/gensupport/backoff.go b/vendor/google.golang.org/api/gensupport/backoff.go deleted file mode 100644 index 1356140472a6..000000000000 --- a/vendor/google.golang.org/api/gensupport/backoff.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "math/rand" - "time" -) - -type BackoffStrategy interface { - // Pause returns the duration of the next pause and true if the operation should be - // retried, or false if no further retries should be attempted. - Pause() (time.Duration, bool) - - // Reset restores the strategy to its initial state. - Reset() -} - -// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff. -// The initial pause time is given by Base. -// Once the total pause time exceeds Max, Pause will indicate no further retries. -type ExponentialBackoff struct { - Base time.Duration - Max time.Duration - total time.Duration - n uint -} - -func (eb *ExponentialBackoff) Pause() (time.Duration, bool) { - if eb.total > eb.Max { - return 0, false - } - - // The next pause is selected from randomly from [0, 2^n * Base). - d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base))) - eb.total += d - eb.n++ - return d, true -} - -func (eb *ExponentialBackoff) Reset() { - eb.n = 0 - eb.total = 0 -} diff --git a/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/google.golang.org/api/gensupport/buffer.go deleted file mode 100644 index 992104911538..000000000000 --- a/vendor/google.golang.org/api/gensupport/buffer.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "bytes" - "io" - - "google.golang.org/api/googleapi" -) - -// MediaBuffer buffers data from an io.Reader to support uploading media in retryable chunks. -type MediaBuffer struct { - media io.Reader - - chunk []byte // The current chunk which is pending upload. The capacity is the chunk size. - err error // Any error generated when populating chunk by reading media. - - // The absolute position of chunk in the underlying media. - off int64 -} - -func NewMediaBuffer(media io.Reader, chunkSize int) *MediaBuffer { - return &MediaBuffer{media: media, chunk: make([]byte, 0, chunkSize)} -} - -// Chunk returns the current buffered chunk, the offset in the underlying media -// from which the chunk is drawn, and the size of the chunk. -// Successive calls to Chunk return the same chunk between calls to Next. -func (mb *MediaBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { - // There may already be data in chunk if Next has not been called since the previous call to Chunk. - if mb.err == nil && len(mb.chunk) == 0 { - mb.err = mb.loadChunk() - } - return bytes.NewReader(mb.chunk), mb.off, len(mb.chunk), mb.err -} - -// loadChunk will read from media into chunk, up to the capacity of chunk. -func (mb *MediaBuffer) loadChunk() error { - bufSize := cap(mb.chunk) - mb.chunk = mb.chunk[:bufSize] - - read := 0 - var err error - for err == nil && read < bufSize { - var n int - n, err = mb.media.Read(mb.chunk[read:]) - read += n - } - mb.chunk = mb.chunk[:read] - return err -} - -// Next advances to the next chunk, which will be returned by the next call to Chunk. -// Calls to Next without a corresponding prior call to Chunk will have no effect. -func (mb *MediaBuffer) Next() { - mb.off += int64(len(mb.chunk)) - mb.chunk = mb.chunk[0:0] -} - -type readerTyper struct { - io.Reader - googleapi.ContentTyper -} - -// ReaderAtToReader adapts a ReaderAt to be used as a Reader. -// If ra implements googleapi.ContentTyper, then the returned reader -// will also implement googleapi.ContentTyper, delegating to ra. -func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader { - r := io.NewSectionReader(ra, 0, size) - if typer, ok := ra.(googleapi.ContentTyper); ok { - return readerTyper{r, typer} - } - return r -} diff --git a/vendor/google.golang.org/api/gensupport/doc.go b/vendor/google.golang.org/api/gensupport/doc.go deleted file mode 100644 index 752c4b411b24..000000000000 --- a/vendor/google.golang.org/api/gensupport/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package gensupport is an internal implementation detail used by code -// generated by the google-api-go-generator tool. -// -// This package may be modified at any time without regard for backwards -// compatibility. It should not be used directly by API users. -package gensupport diff --git a/vendor/google.golang.org/api/gensupport/json.go b/vendor/google.golang.org/api/gensupport/json.go deleted file mode 100644 index dd7bcd2eb079..000000000000 --- a/vendor/google.golang.org/api/gensupport/json.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "encoding/json" - "fmt" - "reflect" - "strings" -) - -// MarshalJSON returns a JSON encoding of schema containing only selected fields. -// A field is selected if: -// * it has a non-empty value, or -// * its field name is present in forceSendFields, and -// * it is not a nil pointer or nil interface. -// The JSON key for each selected field is taken from the field's json: struct tag. -func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) { - if len(forceSendFields) == 0 { - return json.Marshal(schema) - } - - mustInclude := make(map[string]struct{}) - for _, f := range forceSendFields { - mustInclude[f] = struct{}{} - } - - dataMap, err := schemaToMap(schema, mustInclude) - if err != nil { - return nil, err - } - return json.Marshal(dataMap) -} - -func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) { - m := make(map[string]interface{}) - s := reflect.ValueOf(schema) - st := s.Type() - - for i := 0; i < s.NumField(); i++ { - jsonTag := st.Field(i).Tag.Get("json") - if jsonTag == "" { - continue - } - tag, err := parseJSONTag(jsonTag) - if err != nil { - return nil, err - } - if tag.ignore { - continue - } - - v := s.Field(i) - f := st.Field(i) - if !includeField(v, f, mustInclude) { - continue - } - - // nil maps are treated as empty maps. - if f.Type.Kind() == reflect.Map && v.IsNil() { - m[tag.apiName] = map[string]string{} - continue - } - - // nil slices are treated as empty slices. - if f.Type.Kind() == reflect.Slice && v.IsNil() { - m[tag.apiName] = []bool{} - continue - } - - if tag.stringFormat { - m[tag.apiName] = formatAsString(v, f.Type.Kind()) - } else { - m[tag.apiName] = v.Interface() - } - } - return m, nil -} - -// formatAsString returns a string representation of v, dereferencing it first if possible. -func formatAsString(v reflect.Value, kind reflect.Kind) string { - if kind == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - - return fmt.Sprintf("%v", v.Interface()) -} - -// jsonTag represents a restricted version of the struct tag format used by encoding/json. -// It is used to describe the JSON encoding of fields in a Schema struct. -type jsonTag struct { - apiName string - stringFormat bool - ignore bool -} - -// parseJSONTag parses a restricted version of the struct tag format used by encoding/json. -// The format of the tag must match that generated by the Schema.writeSchemaStruct method -// in the api generator. -func parseJSONTag(val string) (jsonTag, error) { - if val == "-" { - return jsonTag{ignore: true}, nil - } - - var tag jsonTag - - i := strings.Index(val, ",") - if i == -1 || val[:i] == "" { - return tag, fmt.Errorf("malformed json tag: %s", val) - } - - tag = jsonTag{ - apiName: val[:i], - } - - switch val[i+1:] { - case "omitempty": - case "omitempty,string": - tag.stringFormat = true - default: - return tag, fmt.Errorf("malformed json tag: %s", val) - } - - return tag, nil -} - -// Reports whether the struct field "f" with value "v" should be included in JSON output. -func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool { - // The regular JSON encoding of a nil pointer is "null", which means "delete this field". - // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set. - // However, many fields are not pointers, so there would be no way to delete these fields. - // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields. - // Deletion will be handled by a separate mechanism. - if f.Type.Kind() == reflect.Ptr && v.IsNil() { - return false - } - - // The "any" type is represented as an interface{}. If this interface - // is nil, there is no reasonable representation to send. We ignore - // these fields, for the same reasons as given above for pointers. - if f.Type.Kind() == reflect.Interface && v.IsNil() { - return false - } - - _, ok := mustInclude[f.Name] - return ok || !isEmptyValue(v) -} - -// isEmptyValue reports whether v is the empty value for its type. This -// implementation is based on that of the encoding/json package, but its -// correctness does not depend on it being identical. What's important is that -// this function return false in situations where v should not be sent as part -// of a PATCH operation. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/gensupport/media.go deleted file mode 100644 index c6410e89a97a..000000000000 --- a/vendor/google.golang.org/api/gensupport/media.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "fmt" - "io" - "io/ioutil" - "mime/multipart" - "net/http" - "net/textproto" - - "google.golang.org/api/googleapi" -) - -const sniffBuffSize = 512 - -func newContentSniffer(r io.Reader) *contentSniffer { - return &contentSniffer{r: r} -} - -// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. -type contentSniffer struct { - r io.Reader - start []byte // buffer for the sniffed bytes. - err error // set to any error encountered while reading bytes to be sniffed. - - ctype string // set on first sniff. - sniffed bool // set to true on first sniff. -} - -func (cs *contentSniffer) Read(p []byte) (n int, err error) { - // Ensure that the content type is sniffed before any data is consumed from Reader. - _, _ = cs.ContentType() - - if len(cs.start) > 0 { - n := copy(p, cs.start) - cs.start = cs.start[n:] - return n, nil - } - - // We may have read some bytes into start while sniffing, even if the read ended in an error. - // We should first return those bytes, then the error. - if cs.err != nil { - return 0, cs.err - } - - // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. - return cs.r.Read(p) -} - -// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed. -func (cs *contentSniffer) ContentType() (string, bool) { - if cs.sniffed { - return cs.ctype, cs.ctype != "" - } - cs.sniffed = true - // If ReadAll hits EOF, it returns err==nil. - cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) - - // Don't try to detect the content type based on possibly incomplete data. - if cs.err != nil { - return "", false - } - - cs.ctype = http.DetectContentType(cs.start) - return cs.ctype, true -} - -// DetermineContentType determines the content type of the supplied reader. -// If the content type is already known, it can be specified via ctype. -// Otherwise, the content of media will be sniffed to determine the content type. -// If media implements googleapi.ContentTyper (deprecated), this will be used -// instead of sniffing the content. -// After calling DetectContentType the caller must not perform further reads on -// media, but rather read from the Reader that is returned. -func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { - // Note: callers could avoid calling DetectContentType if ctype != "", - // but doing the check inside this function reduces the amount of - // generated code. - if ctype != "" { - return media, ctype - } - - // For backwards compatability, allow clients to set content - // type by providing a ContentTyper for media. - if typer, ok := media.(googleapi.ContentTyper); ok { - return media, typer.ContentType() - } - - sniffer := newContentSniffer(media) - if ctype, ok := sniffer.ContentType(); ok { - return sniffer, ctype - } - // If content type could not be sniffed, reads from sniffer will eventually fail with an error. - return sniffer, "" -} - -type typeReader struct { - io.Reader - typ string -} - -// multipartReader combines the contents of multiple readers to creat a multipart/related HTTP body. -// Close must be called if reads from the multipartReader are abandoned before reaching EOF. -type multipartReader struct { - pr *io.PipeReader - pipeOpen bool - ctype string -} - -func newMultipartReader(parts []typeReader) *multipartReader { - mp := &multipartReader{pipeOpen: true} - var pw *io.PipeWriter - mp.pr, pw = io.Pipe() - mpw := multipart.NewWriter(pw) - mp.ctype = "multipart/related; boundary=" + mpw.Boundary() - go func() { - for _, part := range parts { - w, err := mpw.CreatePart(typeHeader(part.typ)) - if err != nil { - mpw.Close() - pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err)) - return - } - _, err = io.Copy(w, part.Reader) - if err != nil { - mpw.Close() - pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err)) - return - } - } - - mpw.Close() - pw.Close() - }() - return mp -} - -func (mp *multipartReader) Read(data []byte) (n int, err error) { - return mp.pr.Read(data) -} - -func (mp *multipartReader) Close() error { - if !mp.pipeOpen { - return nil - } - mp.pipeOpen = false - return mp.pr.Close() -} - -// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body. -// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary. -// -// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF. -func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) { - mp := newMultipartReader([]typeReader{ - {body, bodyContentType}, - {media, mediaContentType}, - }) - return mp, mp.ctype -} - -func typeHeader(contentType string) textproto.MIMEHeader { - h := make(textproto.MIMEHeader) - if contentType != "" { - h.Set("Content-Type", contentType) - } - return h -} - -// PrepareUpload determines whether the data in the supplied reader should be -// uploaded in a single request, or in sequential chunks. -// chunkSize is the size of the chunk that media should be split into. -// If chunkSize is non-zero and the contents of media do not fit in a single -// chunk (or there is an error reading media), then media will be returned as a -// MediaBuffer. Otherwise, media will be returned as a Reader. -// -// After PrepareUpload has been called, media should no longer be used: the -// media content should be accessed via one of the return values. -func PrepareUpload(media io.Reader, chunkSize int) (io.Reader, *MediaBuffer) { - if chunkSize == 0 { // do not chunk - return media, nil - } - - mb := NewMediaBuffer(media, chunkSize) - rdr, _, _, err := mb.Chunk() - - if err == io.EOF { // we can upload this in a single request - return rdr, nil - } - // err might be a non-EOF error. If it is, the next call to mb.Chunk will - // return the same error. Returning a MediaBuffer ensures that this error - // will be handled at some point. - - return nil, mb -} diff --git a/vendor/google.golang.org/api/gensupport/params.go b/vendor/google.golang.org/api/gensupport/params.go deleted file mode 100644 index 3b3c743967ea..000000000000 --- a/vendor/google.golang.org/api/gensupport/params.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "net/url" - - "google.golang.org/api/googleapi" -) - -// URLParams is a simplified replacement for url.Values -// that safely builds up URL parameters for encoding. -type URLParams map[string][]string - -// Get returns the first value for the given key, or "". -func (u URLParams) Get(key string) string { - vs := u[key] - if len(vs) == 0 { - return "" - } - return vs[0] -} - -// Set sets the key to value. -// It replaces any existing values. -func (u URLParams) Set(key, value string) { - u[key] = []string{value} -} - -// SetMulti sets the key to an array of values. -// It replaces any existing values. -// Note that values must not be modified after calling SetMulti -// so the caller is responsible for making a copy if necessary. -func (u URLParams) SetMulti(key string, values []string) { - u[key] = values -} - -// Encode encodes the values into ``URL encoded'' form -// ("bar=baz&foo=quux") sorted by key. -func (u URLParams) Encode() string { - return url.Values(u).Encode() -} - -func SetOptions(u URLParams, opts ...googleapi.CallOption) { - for _, o := range opts { - u.Set(o.Get()) - } -} diff --git a/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/google.golang.org/api/gensupport/resumable.go deleted file mode 100644 index 695e365a1d51..000000000000 --- a/vendor/google.golang.org/api/gensupport/resumable.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "fmt" - "io" - "net/http" - "sync" - "time" - - "golang.org/x/net/context" -) - -const ( - // statusResumeIncomplete is the code returned by the Google uploader - // when the transfer is not yet complete. - statusResumeIncomplete = 308 - - // statusTooManyRequests is returned by the storage API if the - // per-project limits have been temporarily exceeded. The request - // should be retried. - // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes - statusTooManyRequests = 429 -) - -// ResumableUpload is used by the generated APIs to provide resumable uploads. -// It is not used by developers directly. -type ResumableUpload struct { - Client *http.Client - // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". - URI string - UserAgent string // User-Agent for header of the request - // Media is the object being uploaded. - Media *MediaBuffer - // MediaType defines the media type, e.g. "image/jpeg". - MediaType string - - mu sync.Mutex // guards progress - progress int64 // number of bytes uploaded so far - - // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded. - Callback func(int64) - - // If not specified, a default exponential backoff strategy will be used. - Backoff BackoffStrategy -} - -// Progress returns the number of bytes uploaded at this point. -func (rx *ResumableUpload) Progress() int64 { - rx.mu.Lock() - defer rx.mu.Unlock() - return rx.progress -} - -// doUploadRequest performs a single HTTP request to upload data. -// off specifies the offset in rx.Media from which data is drawn. -// size is the number of bytes in data. -// final specifies whether data is the final chunk to be uploaded. -func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) { - req, err := http.NewRequest("POST", rx.URI, data) - if err != nil { - return nil, err - } - - req.ContentLength = size - var contentRange string - if final { - if size == 0 { - contentRange = fmt.Sprintf("bytes */%v", off) - } else { - contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size) - } - } else { - contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1) - } - req.Header.Set("Content-Range", contentRange) - req.Header.Set("Content-Type", rx.MediaType) - req.Header.Set("User-Agent", rx.UserAgent) - return SendRequest(ctx, rx.Client, req) - -} - -// reportProgress calls a user-supplied callback to report upload progress. -// If old==updated, the callback is not called. -func (rx *ResumableUpload) reportProgress(old, updated int64) { - if updated-old == 0 { - return - } - rx.mu.Lock() - rx.progress = updated - rx.mu.Unlock() - if rx.Callback != nil { - rx.Callback(updated) - } -} - -// transferChunk performs a single HTTP request to upload a single chunk from rx.Media. -func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) { - chunk, off, size, err := rx.Media.Chunk() - - done := err == io.EOF - if !done && err != nil { - return nil, err - } - - res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done) - if err != nil { - return res, err - } - - if res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK { - rx.reportProgress(off, off+int64(size)) - } - - if res.StatusCode == statusResumeIncomplete { - rx.Media.Next() - } - return res, nil -} - -func contextDone(ctx context.Context) bool { - select { - case <-ctx.Done(): - return true - default: - return false - } -} - -// Upload starts the process of a resumable upload with a cancellable context. -// It retries using the provided back off strategy until cancelled or the -// strategy indicates to stop retrying. -// It is called from the auto-generated API code and is not visible to the user. -// Before sending an HTTP request, Upload calls any registered hook functions, -// and calls the returned functions after the request returns (see send.go). -// rx is private to the auto-generated API code. -// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. -func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { - var pause time.Duration - backoff := rx.Backoff - if backoff == nil { - backoff = DefaultBackoffStrategy() - } - - for { - // Ensure that we return in the case of cancelled context, even if pause is 0. - if contextDone(ctx) { - return nil, ctx.Err() - } - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-time.After(pause): - } - - resp, err = rx.transferChunk(ctx) - - var status int - if resp != nil { - status = resp.StatusCode - } - - // Check if we should retry the request. - if shouldRetry(status, err) { - var retry bool - pause, retry = backoff.Pause() - if retry { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - continue - } - } - - // If the chunk was uploaded successfully, but there's still - // more to go, upload the next chunk without any delay. - if status == statusResumeIncomplete { - pause = 0 - backoff.Reset() - resp.Body.Close() - continue - } - - // It's possible for err and resp to both be non-nil here, but we expose a simpler - // contract to our callers: exactly one of resp and err will be non-nil. This means - // that any response body must be closed here before returning a non-nil error. - if err != nil { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - return nil, err - } - - return resp, nil - } -} diff --git a/vendor/google.golang.org/api/gensupport/retry.go b/vendor/google.golang.org/api/gensupport/retry.go deleted file mode 100644 index 7f83d1da99fa..000000000000 --- a/vendor/google.golang.org/api/gensupport/retry.go +++ /dev/null @@ -1,77 +0,0 @@ -package gensupport - -import ( - "io" - "net" - "net/http" - "time" - - "golang.org/x/net/context" -) - -// Retry invokes the given function, retrying it multiple times if the connection failed or -// the HTTP status response indicates the request should be attempted again. ctx may be nil. -func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) { - for { - resp, err := f() - - var status int - if resp != nil { - status = resp.StatusCode - } - - // Return if we shouldn't retry. - pause, retry := backoff.Pause() - if !shouldRetry(status, err) || !retry { - return resp, err - } - - // Ensure the response body is closed, if any. - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - - // Pause, but still listen to ctx.Done if context is not nil. - var done <-chan struct{} - if ctx != nil { - done = ctx.Done() - } - select { - case <-done: - return nil, ctx.Err() - case <-time.After(pause): - } - } -} - -// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests. -func DefaultBackoffStrategy() BackoffStrategy { - return &ExponentialBackoff{ - Base: 250 * time.Millisecond, - Max: 16 * time.Second, - } -} - -// shouldRetry returns true if the HTTP response / error indicates that the -// request should be attempted again. -func shouldRetry(status int, err error) bool { - // Retry for 5xx response codes. - if 500 <= status && status < 600 { - return true - } - - // Retry on statusTooManyRequests{ - if status == statusTooManyRequests { - return true - } - - // Retry on unexpected EOFs and temporary network errors. - if err == io.ErrUnexpectedEOF { - return true - } - if err, ok := err.(net.Error); ok { - return err.Temporary() - } - - return false -} diff --git a/vendor/google.golang.org/api/gensupport/send.go b/vendor/google.golang.org/api/gensupport/send.go deleted file mode 100644 index 3d22f638fc72..000000000000 --- a/vendor/google.golang.org/api/gensupport/send.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "net/http" - - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" -) - -// Hook is the type of a function that is called once before each HTTP request -// that is sent by a generated API. It returns a function that is called after -// the request returns. -// Hooks are not called if the context is nil. -type Hook func(ctx context.Context, req *http.Request) func(resp *http.Response) - -var hooks []Hook - -// RegisterHook registers a Hook to be called before each HTTP request by a -// generated API. Hooks are called in the order they are registered. Each -// hook can return a function; if it is non-nil, it is called after the HTTP -// request returns. These functions are called in the reverse order. -// RegisterHook should not be called concurrently with itself or SendRequest. -func RegisterHook(h Hook) { - hooks = append(hooks, h) -} - -// SendRequest sends a single HTTP request using the given client. -// If ctx is non-nil, it calls all hooks, then sends the request with -// ctxhttp.Do, then calls any functions returned by the hooks in reverse order. -func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if ctx == nil { - return client.Do(req) - } - // Call hooks in order of registration, store returned funcs. - post := make([]func(resp *http.Response), len(hooks)) - for i, h := range hooks { - fn := h(ctx, req) - post[i] = fn - } - - // Send request. - resp, err := ctxhttp.Do(ctx, client, req) - - // Call returned funcs in reverse order. - for i := len(post) - 1; i >= 0; i-- { - if fn := post[i]; fn != nil { - fn(resp) - } - } - return resp, err -} diff --git a/vendor/google.golang.org/api/googleapi/BUILD b/vendor/google.golang.org/api/googleapi/BUILD deleted file mode 100644 index f79203d37629..000000000000 --- a/vendor/google.golang.org/api/googleapi/BUILD +++ /dev/null @@ -1,32 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "googleapi.go", - "types.go", - ], - importpath = "google.golang.org/api/googleapi", - deps = ["//vendor/google.golang.org/api/googleapi/internal/uritemplates:go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//vendor/google.golang.org/api/googleapi/internal/uritemplates:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go deleted file mode 100644 index ad059b5d56d3..000000000000 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ /dev/null @@ -1,406 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package googleapi contains the common code shared by all Google API -// libraries. -package googleapi - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "google.golang.org/api/googleapi/internal/uritemplates" -) - -// ContentTyper is an interface for Readers which know (or would like -// to override) their Content-Type. If a media body doesn't implement -// ContentTyper, the type is sniffed from the content using -// http.DetectContentType. -type ContentTyper interface { - ContentType() string -} - -// A SizeReaderAt is a ReaderAt with a Size method. -// An io.SectionReader implements SizeReaderAt. -type SizeReaderAt interface { - io.ReaderAt - Size() int64 -} - -// ServerResponse is embedded in each Do response and -// provides the HTTP status code and header sent by the server. -type ServerResponse struct { - // HTTPStatusCode is the server's response status code. - // When using a resource method's Do call, this will always be in the 2xx range. - HTTPStatusCode int - // Header contains the response header fields from the server. - Header http.Header -} - -const ( - Version = "0.5" - - // UserAgent is the header string used to identify this package. - UserAgent = "google-api-go-client/" + Version - - // The default chunk size to use for resumable uplods if not specified by the user. - DefaultUploadChunkSize = 8 * 1024 * 1024 - - // The minimum chunk size that can be used for resumable uploads. All - // user-specified chunk sizes must be multiple of this value. - MinUploadChunkSize = 256 * 1024 -) - -// Error contains an error response from the server. -type Error struct { - // Code is the HTTP response status code and will always be populated. - Code int `json:"code"` - // Message is the server response message and is only populated when - // explicitly referenced by the JSON server response. - Message string `json:"message"` - // Body is the raw response returned by the server. - // It is often but not always JSON, depending on how the request fails. - Body string - // Header contains the response header fields from the server. - Header http.Header - - Errors []ErrorItem -} - -// ErrorItem is a detailed error code & message from the Google API frontend. -type ErrorItem struct { - // Reason is the typed error code. For example: "some_example". - Reason string `json:"reason"` - // Message is the human-readable description of the error. - Message string `json:"message"` -} - -func (e *Error) Error() string { - if len(e.Errors) == 0 && e.Message == "" { - return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body) - } - var buf bytes.Buffer - fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code) - if e.Message != "" { - fmt.Fprintf(&buf, "%s", e.Message) - } - if len(e.Errors) == 0 { - return strings.TrimSpace(buf.String()) - } - if len(e.Errors) == 1 && e.Errors[0].Message == e.Message { - fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason) - return buf.String() - } - fmt.Fprintln(&buf, "\nMore details:") - for _, v := range e.Errors { - fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message) - } - return buf.String() -} - -type errorReply struct { - Error *Error `json:"error"` -} - -// CheckResponse returns an error (of type *Error) if the response -// status code is not 2xx. -func CheckResponse(res *http.Response) error { - if res.StatusCode >= 200 && res.StatusCode <= 299 { - return nil - } - slurp, err := ioutil.ReadAll(res.Body) - if err == nil { - jerr := new(errorReply) - err = json.Unmarshal(slurp, jerr) - if err == nil && jerr.Error != nil { - if jerr.Error.Code == 0 { - jerr.Error.Code = res.StatusCode - } - jerr.Error.Body = string(slurp) - return jerr.Error - } - } - return &Error{ - Code: res.StatusCode, - Body: string(slurp), - Header: res.Header, - } -} - -// IsNotModified reports whether err is the result of the -// server replying with http.StatusNotModified. -// Such error values are sometimes returned by "Do" methods -// on calls when If-None-Match is used. -func IsNotModified(err error) bool { - if err == nil { - return false - } - ae, ok := err.(*Error) - return ok && ae.Code == http.StatusNotModified -} - -// CheckMediaResponse returns an error (of type *Error) if the response -// status code is not 2xx. Unlike CheckResponse it does not assume the -// body is a JSON error document. -// It is the caller's responsibility to close res.Body. -func CheckMediaResponse(res *http.Response) error { - if res.StatusCode >= 200 && res.StatusCode <= 299 { - return nil - } - slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) - return &Error{ - Code: res.StatusCode, - Body: string(slurp), - } -} - -type MarshalStyle bool - -var WithDataWrapper = MarshalStyle(true) -var WithoutDataWrapper = MarshalStyle(false) - -func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { - buf := new(bytes.Buffer) - if wrap { - buf.Write([]byte(`{"data": `)) - } - err := json.NewEncoder(buf).Encode(v) - if err != nil { - return nil, err - } - if wrap { - buf.Write([]byte(`}`)) - } - return buf, nil -} - -// endingWithErrorReader from r until it returns an error. If the -// final error from r is io.EOF and e is non-nil, e is used instead. -type endingWithErrorReader struct { - r io.Reader - e error -} - -func (er endingWithErrorReader) Read(p []byte) (n int, err error) { - n, err = er.r.Read(p) - if err == io.EOF && er.e != nil { - err = er.e - } - return -} - -// countingWriter counts the number of bytes it receives to write, but -// discards them. -type countingWriter struct { - n *int64 -} - -func (w countingWriter) Write(p []byte) (int, error) { - *w.n += int64(len(p)) - return len(p), nil -} - -// ProgressUpdater is a function that is called upon every progress update of a resumable upload. -// This is the only part of a resumable upload (from googleapi) that is usable by the developer. -// The remaining usable pieces of resumable uploads is exposed in each auto-generated API. -type ProgressUpdater func(current, total int64) - -type MediaOption interface { - setOptions(o *MediaOptions) -} - -type contentTypeOption string - -func (ct contentTypeOption) setOptions(o *MediaOptions) { - o.ContentType = string(ct) - if o.ContentType == "" { - o.ForceEmptyContentType = true - } -} - -// ContentType returns a MediaOption which sets the Content-Type header for media uploads. -// If ctype is empty, the Content-Type header will be omitted. -func ContentType(ctype string) MediaOption { - return contentTypeOption(ctype) -} - -type chunkSizeOption int - -func (cs chunkSizeOption) setOptions(o *MediaOptions) { - size := int(cs) - if size%MinUploadChunkSize != 0 { - size += MinUploadChunkSize - (size % MinUploadChunkSize) - } - o.ChunkSize = size -} - -// ChunkSize returns a MediaOption which sets the chunk size for media uploads. -// size will be rounded up to the nearest multiple of 256K. -// Media which contains fewer than size bytes will be uploaded in a single request. -// Media which contains size bytes or more will be uploaded in separate chunks. -// If size is zero, media will be uploaded in a single request. -func ChunkSize(size int) MediaOption { - return chunkSizeOption(size) -} - -// MediaOptions stores options for customizing media upload. It is not used by developers directly. -type MediaOptions struct { - ContentType string - ForceEmptyContentType bool - - ChunkSize int -} - -// ProcessMediaOptions stores options from opts in a MediaOptions. -// It is not used by developers directly. -func ProcessMediaOptions(opts []MediaOption) *MediaOptions { - mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize} - for _, o := range opts { - o.setOptions(mo) - } - return mo -} - -func ResolveRelative(basestr, relstr string) string { - u, _ := url.Parse(basestr) - rel, _ := url.Parse(relstr) - u = u.ResolveReference(rel) - us := u.String() - us = strings.Replace(us, "%7B", "{", -1) - us = strings.Replace(us, "%7D", "}", -1) - return us -} - -// Expand subsitutes any {encoded} strings in the URL passed in using -// the map supplied. -// -// This calls SetOpaque to avoid encoding of the parameters in the URL path. -func Expand(u *url.URL, expansions map[string]string) { - escaped, unescaped, err := uritemplates.Expand(u.Path, expansions) - if err == nil { - u.Path = unescaped - u.RawPath = escaped - } -} - -// CloseBody is used to close res.Body. -// Prior to calling Close, it also tries to Read a small amount to see an EOF. -// Not seeing an EOF can prevent HTTP Transports from reusing connections. -func CloseBody(res *http.Response) { - if res == nil || res.Body == nil { - return - } - // Justification for 3 byte reads: two for up to "\r\n" after - // a JSON/XML document, and then 1 to see EOF if we haven't yet. - // TODO(bradfitz): detect Go 1.3+ and skip these reads. - // See https://codereview.appspot.com/58240043 - // and https://codereview.appspot.com/49570044 - buf := make([]byte, 1) - for i := 0; i < 3; i++ { - _, err := res.Body.Read(buf) - if err != nil { - break - } - } - res.Body.Close() - -} - -// VariantType returns the type name of the given variant. -// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned. -// This is used to support "variant" APIs that can return one of a number of different types. -func VariantType(t map[string]interface{}) string { - s, _ := t["type"].(string) - return s -} - -// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'. -// This is used to support "variant" APIs that can return one of a number of different types. -// It reports whether the conversion was successful. -func ConvertVariant(v map[string]interface{}, dst interface{}) bool { - var buf bytes.Buffer - err := json.NewEncoder(&buf).Encode(v) - if err != nil { - return false - } - return json.Unmarshal(buf.Bytes(), dst) == nil -} - -// A Field names a field to be retrieved with a partial response. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// -// Partial responses can dramatically reduce the amount of data that must be sent to your application. -// In order to request partial responses, you can specify the full list of fields -// that your application needs by adding the Fields option to your request. -// -// Field strings use camelCase with leading lower-case characters to identify fields within the response. -// -// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, -// you could request just those fields like this: -// -// svc.Events.List().Fields("nextPageToken", "items/id").Do() -// -// or if you were also interested in each Item's "Updated" field, you can combine them like this: -// -// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() -// -// More information about field formatting can be found here: -// https://developers.google.com/+/api/#fields-syntax -// -// Another way to find field names is through the Google API explorer: -// https://developers.google.com/apis-explorer/#p/ -type Field string - -// CombineFields combines fields into a single string. -func CombineFields(s []Field) string { - r := make([]string, len(s)) - for i, v := range s { - r[i] = string(v) - } - return strings.Join(r, ",") -} - -// A CallOption is an optional argument to an API call. -// It should be treated as an opaque value by users of Google APIs. -// -// A CallOption is something that configures an API call in a way that is -// not specific to that API; for instance, controlling the quota user for -// an API call is common across many APIs, and is thus a CallOption. -type CallOption interface { - Get() (key, value string) -} - -// QuotaUser returns a CallOption that will set the quota user for a call. -// The quota user can be used by server-side applications to control accounting. -// It can be an arbitrary string up to 40 characters, and will override UserIP -// if both are provided. -func QuotaUser(u string) CallOption { return quotaUser(u) } - -type quotaUser string - -func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) } - -// UserIP returns a CallOption that will set the "userIp" parameter of a call. -// This should be the IP address of the originating request. -func UserIP(ip string) CallOption { return userIP(ip) } - -type userIP string - -func (i userIP) Get() (string, string) { return "userIp", string(i) } - -// Trace returns a CallOption that enables diagnostic tracing for a call. -// traceToken is an ID supplied by Google support. -func Trace(traceToken string) CallOption { return traceTok(traceToken) } - -type traceTok string - -func (t traceTok) Get() (string, string) { return "trace", "token:" + string(t) } - -// TODO: Fields too diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/BUILD b/vendor/google.golang.org/api/googleapi/internal/uritemplates/BUILD deleted file mode 100644 index 3cc8b54dc8a8..000000000000 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "uritemplates.go", - "utils.go", - ], - importpath = "google.golang.org/api/googleapi/internal/uritemplates", -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE deleted file mode 100644 index de9c88cb65cb..000000000000 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE +++ /dev/null @@ -1,18 +0,0 @@ -Copyright (c) 2013 Joshua Tacoma - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go deleted file mode 100644 index 63bf0538301a..000000000000 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2013 Joshua Tacoma. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uritemplates is a level 3 implementation of RFC 6570 (URI -// Template, http://tools.ietf.org/html/rfc6570). -// uritemplates does not support composite values (in Go: slices or maps) -// and so does not qualify as a level 4 implementation. -package uritemplates - -import ( - "bytes" - "errors" - "regexp" - "strconv" - "strings" -) - -var ( - unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") - reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") - validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") - hex = []byte("0123456789ABCDEF") -) - -func pctEncode(src []byte) []byte { - dst := make([]byte, len(src)*3) - for i, b := range src { - buf := dst[i*3 : i*3+3] - buf[0] = 0x25 - buf[1] = hex[b/16] - buf[2] = hex[b%16] - } - return dst -} - -// pairWriter is a convenience struct which allows escaped and unescaped -// versions of the template to be written in parallel. -type pairWriter struct { - escaped, unescaped bytes.Buffer -} - -// Write writes the provided string directly without any escaping. -func (w *pairWriter) Write(s string) { - w.escaped.WriteString(s) - w.unescaped.WriteString(s) -} - -// Escape writes the provided string, escaping the string for the -// escaped output. -func (w *pairWriter) Escape(s string, allowReserved bool) { - w.unescaped.WriteString(s) - if allowReserved { - w.escaped.Write(reserved.ReplaceAllFunc([]byte(s), pctEncode)) - } else { - w.escaped.Write(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) - } -} - -// Escaped returns the escaped string. -func (w *pairWriter) Escaped() string { - return w.escaped.String() -} - -// Unescaped returns the unescaped string. -func (w *pairWriter) Unescaped() string { - return w.unescaped.String() -} - -// A uriTemplate is a parsed representation of a URI template. -type uriTemplate struct { - raw string - parts []templatePart -} - -// parse parses a URI template string into a uriTemplate object. -func parse(rawTemplate string) (*uriTemplate, error) { - split := strings.Split(rawTemplate, "{") - parts := make([]templatePart, len(split)*2-1) - for i, s := range split { - if i == 0 { - if strings.Contains(s, "}") { - return nil, errors.New("unexpected }") - } - parts[i].raw = s - continue - } - subsplit := strings.Split(s, "}") - if len(subsplit) != 2 { - return nil, errors.New("malformed template") - } - expression := subsplit[0] - var err error - parts[i*2-1], err = parseExpression(expression) - if err != nil { - return nil, err - } - parts[i*2].raw = subsplit[1] - } - return &uriTemplate{ - raw: rawTemplate, - parts: parts, - }, nil -} - -type templatePart struct { - raw string - terms []templateTerm - first string - sep string - named bool - ifemp string - allowReserved bool -} - -type templateTerm struct { - name string - explode bool - truncate int -} - -func parseExpression(expression string) (result templatePart, err error) { - switch expression[0] { - case '+': - result.sep = "," - result.allowReserved = true - expression = expression[1:] - case '.': - result.first = "." - result.sep = "." - expression = expression[1:] - case '/': - result.first = "/" - result.sep = "/" - expression = expression[1:] - case ';': - result.first = ";" - result.sep = ";" - result.named = true - expression = expression[1:] - case '?': - result.first = "?" - result.sep = "&" - result.named = true - result.ifemp = "=" - expression = expression[1:] - case '&': - result.first = "&" - result.sep = "&" - result.named = true - result.ifemp = "=" - expression = expression[1:] - case '#': - result.first = "#" - result.sep = "," - result.allowReserved = true - expression = expression[1:] - default: - result.sep = "," - } - rawterms := strings.Split(expression, ",") - result.terms = make([]templateTerm, len(rawterms)) - for i, raw := range rawterms { - result.terms[i], err = parseTerm(raw) - if err != nil { - break - } - } - return result, err -} - -func parseTerm(term string) (result templateTerm, err error) { - // TODO(djd): Remove "*" suffix parsing once we check that no APIs have - // mistakenly used that attribute. - if strings.HasSuffix(term, "*") { - result.explode = true - term = term[:len(term)-1] - } - split := strings.Split(term, ":") - if len(split) == 1 { - result.name = term - } else if len(split) == 2 { - result.name = split[0] - var parsed int64 - parsed, err = strconv.ParseInt(split[1], 10, 0) - result.truncate = int(parsed) - } else { - err = errors.New("multiple colons in same term") - } - if !validname.MatchString(result.name) { - err = errors.New("not a valid name: " + result.name) - } - if result.explode && result.truncate > 0 { - err = errors.New("both explode and prefix modifers on same term") - } - return result, err -} - -// Expand expands a URI template with a set of values to produce the -// resultant URI. Two forms of the result are returned: one with all the -// elements escaped, and one with the elements unescaped. -func (t *uriTemplate) Expand(values map[string]string) (escaped, unescaped string) { - var w pairWriter - for _, p := range t.parts { - p.expand(&w, values) - } - return w.Escaped(), w.Unescaped() -} - -func (tp *templatePart) expand(w *pairWriter, values map[string]string) { - if len(tp.raw) > 0 { - w.Write(tp.raw) - return - } - var first = true - for _, term := range tp.terms { - value, exists := values[term.name] - if !exists { - continue - } - if first { - w.Write(tp.first) - first = false - } else { - w.Write(tp.sep) - } - tp.expandString(w, term, value) - } -} - -func (tp *templatePart) expandName(w *pairWriter, name string, empty bool) { - if tp.named { - w.Write(name) - if empty { - w.Write(tp.ifemp) - } else { - w.Write("=") - } - } -} - -func (tp *templatePart) expandString(w *pairWriter, t templateTerm, s string) { - if len(s) > t.truncate && t.truncate > 0 { - s = s[:t.truncate] - } - tp.expandName(w, t.name, len(s) == 0) - w.Escape(s, tp.allowReserved) -} diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go deleted file mode 100644 index 2e70b81543d0..000000000000 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uritemplates - -// Expand parses then expands a URI template with a set of values to produce -// the resultant URI. Two forms of the result are returned: one with all the -// elements escaped, and one with the elements unescaped. -func Expand(path string, values map[string]string) (escaped, unescaped string, err error) { - template, err := parse(path) - if err != nil { - return "", "", err - } - escaped, unescaped = template.Expand(values) - return escaped, unescaped, nil -} diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go deleted file mode 100644 index a02b4b0716bc..000000000000 --- a/vendor/google.golang.org/api/googleapi/types.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2013 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package googleapi - -import ( - "encoding/json" - "strconv" -) - -// Int64s is a slice of int64s that marshal as quoted strings in JSON. -type Int64s []int64 - -func (q *Int64s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return err - } - *q = append(*q, int64(v)) - } - return nil -} - -// Int32s is a slice of int32s that marshal as quoted strings in JSON. -type Int32s []int32 - -func (q *Int32s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseInt(s, 10, 32) - if err != nil { - return err - } - *q = append(*q, int32(v)) - } - return nil -} - -// Uint64s is a slice of uint64s that marshal as quoted strings in JSON. -type Uint64s []uint64 - -func (q *Uint64s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return err - } - *q = append(*q, uint64(v)) - } - return nil -} - -// Uint32s is a slice of uint32s that marshal as quoted strings in JSON. -type Uint32s []uint32 - -func (q *Uint32s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseUint(s, 10, 32) - if err != nil { - return err - } - *q = append(*q, uint32(v)) - } - return nil -} - -// Float64s is a slice of float64s that marshal as quoted strings in JSON. -type Float64s []float64 - -func (q *Float64s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseFloat(s, 64) - if err != nil { - return err - } - *q = append(*q, float64(v)) - } - return nil -} - -func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) { - dst := make([]byte, 0, 2+n*10) // somewhat arbitrary - dst = append(dst, '[') - for i := 0; i < n; i++ { - if i > 0 { - dst = append(dst, ',') - } - dst = append(dst, '"') - dst = fn(dst, i) - dst = append(dst, '"') - } - dst = append(dst, ']') - return dst, nil -} - -func (s Int64s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendInt(dst, s[i], 10) - }) -} - -func (s Int32s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendInt(dst, int64(s[i]), 10) - }) -} - -func (s Uint64s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendUint(dst, s[i], 10) - }) -} - -func (s Uint32s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendUint(dst, uint64(s[i]), 10) - }) -} - -func (s Float64s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendFloat(dst, s[i], 'g', -1, 64) - }) -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { return &v } - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { return &v } - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { return &v } - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { return &v } - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { return &v } - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { return &v } - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { return &v } diff --git a/vendor/google.golang.org/api/internal/BUILD b/vendor/google.golang.org/api/internal/BUILD deleted file mode 100644 index 0b55a72d1222..000000000000 --- a/vendor/google.golang.org/api/internal/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "pool.go", - "settings.go", - ], - importpath = "google.golang.org/api/internal", - deps = [ - "//vendor/golang.org/x/oauth2:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/google.golang.org/grpc/naming:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go deleted file mode 100644 index 4150feb6bb09..000000000000 --- a/vendor/google.golang.org/api/internal/pool.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "errors" - "google.golang.org/grpc/naming" -) - -// PoolResolver provides a fixed list of addresses to load balance between -// and does not provide further updates. -type PoolResolver struct { - poolSize int - dialOpt *DialSettings - ch chan []*naming.Update -} - -// NewPoolResolver returns a PoolResolver -// This is an EXPERIMENTAL API and may be changed or removed in the future. -func NewPoolResolver(size int, o *DialSettings) *PoolResolver { - return &PoolResolver{poolSize: size, dialOpt: o} -} - -// Resolve returns a Watcher for the endpoint defined by the DialSettings -// provided to NewPoolResolver. -func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) { - if r.dialOpt.Endpoint == "" { - return nil, errors.New("No endpoint configured") - } - addrs := make([]*naming.Update, 0, r.poolSize) - for i := 0; i < r.poolSize; i++ { - addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i}) - } - r.ch = make(chan []*naming.Update, 1) - r.ch <- addrs - return r, nil -} - -// Next returns a static list of updates on the first call, -// and blocks indefinitely until Close is called on subsequent calls. -func (r *PoolResolver) Next() ([]*naming.Update, error) { - return <-r.ch, nil -} - -func (r *PoolResolver) Close() { - close(r.ch) -} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go deleted file mode 100644 index 976280b21b10..000000000000 --- a/vendor/google.golang.org/api/internal/settings.go +++ /dev/null @@ -1,22 +0,0 @@ -// Package internal supports the options and transport packages. -package internal - -import ( - "net/http" - - "golang.org/x/oauth2" - "google.golang.org/grpc" -) - -// DialSettings holds information needed to establish a connection with a -// Google API service. -type DialSettings struct { - Endpoint string - Scopes []string - ServiceAccountJSONFilename string // if set, TokenSource is ignored. - TokenSource oauth2.TokenSource - UserAgent string - HTTPClient *http.Client - GRPCDialOpts []grpc.DialOption - GRPCConn *grpc.ClientConn -} diff --git a/vendor/google.golang.org/api/iterator/BUILD b/vendor/google.golang.org/api/iterator/BUILD deleted file mode 100644 index 1530a9623937..000000000000 --- a/vendor/google.golang.org/api/iterator/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["iterator.go"], - importpath = "google.golang.org/api/iterator", -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go deleted file mode 100644 index 0640c82311a7..000000000000 --- a/vendor/google.golang.org/api/iterator/iterator.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package iterator provides support for standard Google API iterators. -// See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines. -package iterator - -import ( - "errors" - "fmt" - "reflect" -) - -// Done is returned by an iterator's Next method when the iteration is -// complete; when there are no more items to return. -var Done = errors.New("no more items in iterator") - -// We don't support mixed calls to Next and NextPage because they play -// with the paging state in incompatible ways. -var errMixed = errors.New("iterator: Next and NextPage called on same iterator") - -// PageInfo contains information about an iterator's paging state. -type PageInfo struct { - // Token is the token used to retrieve the next page of items from the - // API. You may set Token immediately after creating an iterator to - // begin iteration at a particular point. If Token is the empty string, - // the iterator will begin with the first eligible item. - // - // The result of setting Token after the first call to Next is undefined. - // - // After the underlying API method is called to retrieve a page of items, - // Token is set to the next-page token in the response. - Token string - - // MaxSize is the maximum number of items returned by a call to the API. - // Set MaxSize as a hint to optimize the buffering behavior of the iterator. - // If zero, the page size is determined by the underlying service. - // - // Use Pager to retrieve a page of a specific, exact size. - MaxSize int - - // The error state of the iterator. Manipulated by PageInfo.next and Pager. - // This is a latch: it starts as nil, and once set should never change. - err error - - // If true, no more calls to fetch should be made. Set to true when fetch - // returns an empty page token. The iterator is Done when this is true AND - // the buffer is empty. - atEnd bool - - // Function that fetches a page from the underlying service. It should pass - // the pageSize and pageToken arguments to the service, fill the buffer - // with the results from the call, and return the next-page token returned - // by the service. The function must not remove any existing items from the - // buffer. If the underlying RPC takes an int32 page size, pageSize should - // be silently truncated. - fetch func(pageSize int, pageToken string) (nextPageToken string, err error) - - // Function that clears the iterator's buffer, returning any currently buffered items. - bufLen func() int - - // Function that returns the buffer, after setting the buffer variable to nil. - takeBuf func() interface{} - - // Set to true on first call to PageInfo.next or Pager.NextPage. Used to check - // for calls to both Next and NextPage with the same iterator. - nextCalled, nextPageCalled bool -} - -// NewPageInfo exposes internals for iterator implementations. -// It is not a stable interface. -var NewPageInfo = newPageInfo - -// If an iterator can support paging, its iterator-creating method should call -// this (via the NewPageInfo variable above). -// -// The fetch, bufLen and takeBuf arguments provide access to the -// iterator's internal slice of buffered items. They behave as described in -// PageInfo, above. -// -// The return value is the PageInfo.next method bound to the returned PageInfo value. -// (Returning it avoids exporting PageInfo.next.) -func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (*PageInfo, func() error) { - pi := &PageInfo{ - fetch: fetch, - bufLen: bufLen, - takeBuf: takeBuf, - } - return pi, pi.next -} - -// Remaining returns the number of items available before the iterator makes another API call. -func (pi *PageInfo) Remaining() int { return pi.bufLen() } - -// next provides support for an iterator's Next function. An iterator's Next -// should return the error returned by next if non-nil; else it can assume -// there is at least one item in its buffer, and it should return that item and -// remove it from the buffer. -func (pi *PageInfo) next() error { - pi.nextCalled = true - if pi.err != nil { // Once we get an error, always return it. - // TODO(jba): fix so users can retry on transient errors? Probably not worth it. - return pi.err - } - if pi.nextPageCalled { - pi.err = errMixed - return pi.err - } - // Loop until we get some items or reach the end. - for pi.bufLen() == 0 && !pi.atEnd { - if err := pi.fill(pi.MaxSize); err != nil { - pi.err = err - return pi.err - } - if pi.Token == "" { - pi.atEnd = true - } - } - // Either the buffer is non-empty or pi.atEnd is true (or both). - if pi.bufLen() == 0 { - // The buffer is empty and pi.atEnd is true, i.e. the service has no - // more items. - pi.err = Done - } - return pi.err -} - -// Call the service to fill the buffer, using size and pi.Token. Set pi.Token to the -// next-page token returned by the call. -// If fill returns a non-nil error, the buffer will be empty. -func (pi *PageInfo) fill(size int) error { - tok, err := pi.fetch(size, pi.Token) - if err != nil { - pi.takeBuf() // clear the buffer - return err - } - pi.Token = tok - return nil -} - -// Pageable is implemented by iterators that support paging. -type Pageable interface { - // PageInfo returns paging information associated with the iterator. - PageInfo() *PageInfo -} - -// Pager supports retrieving iterator items a page at a time. -type Pager struct { - pageInfo *PageInfo - pageSize int -} - -// NewPager returns a pager that uses iter. Calls to its NextPage method will -// obtain exactly pageSize items, unless fewer remain. The pageToken argument -// indicates where to start the iteration. Pass the empty string to start at -// the beginning, or pass a token retrieved from a call to Pager.NextPage. -// -// If you use an iterator with a Pager, you must not call Next on the iterator. -func NewPager(iter Pageable, pageSize int, pageToken string) *Pager { - p := &Pager{ - pageInfo: iter.PageInfo(), - pageSize: pageSize, - } - p.pageInfo.Token = pageToken - if pageSize <= 0 { - p.pageInfo.err = errors.New("iterator: page size must be positive") - } - return p -} - -// NextPage retrieves a sequence of items from the iterator and appends them -// to slicep, which must be a pointer to a slice of the iterator's item type. -// Exactly p.pageSize items will be appended, unless fewer remain. -// -// The first return value is the page token to use for the next page of items. -// If empty, there are no more pages. Aside from checking for the end of the -// iteration, the returned page token is only needed if the iteration is to be -// resumed a later time, in another context (possibly another process). -// -// The second return value is non-nil if an error occurred. It will never be -// the special iterator sentinel value Done. To recognize the end of the -// iteration, compare nextPageToken to the empty string. -// -// It is possible for NextPage to return a single zero-length page along with -// an empty page token when there are no more items in the iteration. -func (p *Pager) NextPage(slicep interface{}) (nextPageToken string, err error) { - p.pageInfo.nextPageCalled = true - if p.pageInfo.err != nil { - return "", p.pageInfo.err - } - if p.pageInfo.nextCalled { - p.pageInfo.err = errMixed - return "", p.pageInfo.err - } - if p.pageInfo.bufLen() > 0 { - return "", errors.New("must call NextPage with an empty buffer") - } - // The buffer must be empty here, so takeBuf is a no-op. We call it just to get - // the buffer's type. - wantSliceType := reflect.PtrTo(reflect.ValueOf(p.pageInfo.takeBuf()).Type()) - if slicep == nil { - return "", errors.New("nil passed to Pager.NextPage") - } - vslicep := reflect.ValueOf(slicep) - if vslicep.Type() != wantSliceType { - return "", fmt.Errorf("slicep should be of type %s, got %T", wantSliceType, slicep) - } - for p.pageInfo.bufLen() < p.pageSize { - if err := p.pageInfo.fill(p.pageSize - p.pageInfo.bufLen()); err != nil { - p.pageInfo.err = err - return "", p.pageInfo.err - } - if p.pageInfo.Token == "" { - break - } - } - e := vslicep.Elem() - e.Set(reflect.AppendSlice(e, reflect.ValueOf(p.pageInfo.takeBuf()))) - return p.pageInfo.Token, nil -} diff --git a/vendor/google.golang.org/api/option/BUILD b/vendor/google.golang.org/api/option/BUILD deleted file mode 100644 index 37d82b7dcc9c..000000000000 --- a/vendor/google.golang.org/api/option/BUILD +++ /dev/null @@ -1,30 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["option.go"], - importpath = "google.golang.org/api/option", - deps = [ - "//vendor/golang.org/x/oauth2:go_default_library", - "//vendor/google.golang.org/api/internal:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go deleted file mode 100644 index b935e6d12383..000000000000 --- a/vendor/google.golang.org/api/option/option.go +++ /dev/null @@ -1,132 +0,0 @@ -// Package option contains options for Google API clients. -package option - -import ( - "net/http" - - "golang.org/x/oauth2" - "google.golang.org/api/internal" - "google.golang.org/grpc" -) - -// A ClientOption is an option for a Google API client. -type ClientOption interface { - Apply(*internal.DialSettings) -} - -// WithTokenSource returns a ClientOption that specifies an OAuth2 token -// source to be used as the basis for authentication. -func WithTokenSource(s oauth2.TokenSource) ClientOption { - return withTokenSource{s} -} - -type withTokenSource struct{ ts oauth2.TokenSource } - -func (w withTokenSource) Apply(o *internal.DialSettings) { - o.TokenSource = w.ts -} - -// WithServiceAccountFile returns a ClientOption that uses a Google service -// account credentials file to authenticate. -// Use WithTokenSource with a token source created from -// golang.org/x/oauth2/google.JWTConfigFromJSON -// if reading the file from disk is not an option. -func WithServiceAccountFile(filename string) ClientOption { - return withServiceAccountFile(filename) -} - -type withServiceAccountFile string - -func (w withServiceAccountFile) Apply(o *internal.DialSettings) { - o.ServiceAccountJSONFilename = string(w) -} - -// WithEndpoint returns a ClientOption that overrides the default endpoint -// to be used for a service. -func WithEndpoint(url string) ClientOption { - return withEndpoint(url) -} - -type withEndpoint string - -func (w withEndpoint) Apply(o *internal.DialSettings) { - o.Endpoint = string(w) -} - -// WithScopes returns a ClientOption that overrides the default OAuth2 scopes -// to be used for a service. -func WithScopes(scope ...string) ClientOption { - return withScopes(scope) -} - -type withScopes []string - -func (w withScopes) Apply(o *internal.DialSettings) { - s := make([]string, len(w)) - copy(s, w) - o.Scopes = s -} - -// WithUserAgent returns a ClientOption that sets the User-Agent. -func WithUserAgent(ua string) ClientOption { - return withUA(ua) -} - -type withUA string - -func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) } - -// WithHTTPClient returns a ClientOption that specifies the HTTP client to use -// as the basis of communications. This option may only be used with services -// that support HTTP as their communication transport. When used, the -// WithHTTPClient option takes precedent over all other supplied options. -func WithHTTPClient(client *http.Client) ClientOption { - return withHTTPClient{client} -} - -type withHTTPClient struct{ client *http.Client } - -func (w withHTTPClient) Apply(o *internal.DialSettings) { - o.HTTPClient = w.client -} - -// WithGRPCConn returns a ClientOption that specifies the gRPC client -// connection to use as the basis of communications. This option many only be -// used with services that support gRPC as their communication transport. When -// used, the WithGRPCConn option takes precedent over all other supplied -// options. -func WithGRPCConn(conn *grpc.ClientConn) ClientOption { - return withGRPCConn{conn} -} - -type withGRPCConn struct{ conn *grpc.ClientConn } - -func (w withGRPCConn) Apply(o *internal.DialSettings) { - o.GRPCConn = w.conn -} - -// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption -// to an underlying gRPC dial. It does not work with WithGRPCConn. -func WithGRPCDialOption(opt grpc.DialOption) ClientOption { - return withGRPCDialOption{opt} -} - -type withGRPCDialOption struct{ opt grpc.DialOption } - -func (w withGRPCDialOption) Apply(o *internal.DialSettings) { - o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt) -} - -// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC -// connections that requests will be balanced between. -// This is an EXPERIMENTAL API and may be changed or removed in the future. -func WithGRPCConnectionPool(size int) ClientOption { - return withGRPCConnectionPool(size) -} - -type withGRPCConnectionPool int - -func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) { - balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o)) - o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer)) -} diff --git a/vendor/google.golang.org/api/pubsub/v1/BUILD b/vendor/google.golang.org/api/pubsub/v1/BUILD deleted file mode 100644 index 7eeb7f94e19e..000000000000 --- a/vendor/google.golang.org/api/pubsub/v1/BUILD +++ /dev/null @@ -1,31 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["pubsub-gen.go"], - importpath = "google.golang.org/api/pubsub/v1", - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/golang.org/x/net/context/ctxhttp:go_default_library", - "//vendor/google.golang.org/api/gensupport:go_default_library", - "//vendor/google.golang.org/api/googleapi:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json b/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json deleted file mode 100644 index c18125b16f78..000000000000 --- a/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json +++ /dev/null @@ -1,1040 +0,0 @@ -{ - "kind": "discovery#restDescription", - "etag": "\"jQLIOHBVnDZie4rQHGH1WJF-INE/liyzgLngirW3xU7Tt2Pd1AnSK1c\"", - "discoveryVersion": "v1", - "id": "pubsub:v1", - "name": "pubsub", - "version": "v1", - "revision": "20160317", - "title": "Google Cloud Pub/Sub API", - "description": "Provides reliable, many-to-many, asynchronous messaging between applications.", - "ownerDomain": "google.com", - "ownerName": "Google", - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "documentationLink": "https://cloud.google.com/pubsub/docs", - "protocol": "rest", - "baseUrl": "https://pubsub.googleapis.com/", - "basePath": "", - "rootUrl": "https://pubsub.googleapis.com/", - "servicePath": "", - "batchPath": "batch", - "parameters": { - "access_token": { - "type": "string", - "description": "OAuth access token.", - "location": "query" - }, - "alt": { - "type": "string", - "description": "Data format for response.", - "default": "json", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query" - }, - "bearer_token": { - "type": "string", - "description": "OAuth bearer token.", - "location": "query" - }, - "callback": { - "type": "string", - "description": "JSONP", - "location": "query" - }, - "fields": { - "type": "string", - "description": "Selector specifying which fields to include in a partial response.", - "location": "query" - }, - "key": { - "type": "string", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query" - }, - "oauth_token": { - "type": "string", - "description": "OAuth 2.0 token for the current user.", - "location": "query" - }, - "pp": { - "type": "boolean", - "description": "Pretty-print response.", - "default": "true", - "location": "query" - }, - "prettyPrint": { - "type": "boolean", - "description": "Returns response with indentations and line breaks.", - "default": "true", - "location": "query" - }, - "quotaUser": { - "type": "string", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "location": "query" - }, - "upload_protocol": { - "type": "string", - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "location": "query" - }, - "uploadType": { - "type": "string", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "location": "query" - }, - "$.xgafv": { - "type": "string", - "description": "V1 error format.", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query" - } - }, - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/pubsub": { - "description": "View and manage Pub/Sub topics and subscriptions" - } - } - } - }, - "schemas": { - "SetIamPolicyRequest": { - "id": "SetIamPolicyRequest", - "type": "object", - "description": "Request message for `SetIamPolicy` method.", - "properties": { - "policy": { - "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." - } - } - }, - "Policy": { - "id": "Policy", - "type": "object", - "description": "Defines an Identity and Access Management (IAM) policy. It is used to specify access control policies for Cloud Platform resources. A `Policy` consists of a list of `bindings`. A `Binding` binds a list of `members` to a `role`, where the members can be user accounts, Google groups, Google domains, and service accounts. A `role` is a named list of permissions defined by IAM. **Example** { \"bindings\": [ { \"role\": \"roles/owner\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-other-app@appspot.gserviceaccount.com\", ] }, { \"role\": \"roles/viewer\", \"members\": [\"user:sean@example.com\"] } ] } For a description of IAM and its features, see the [IAM developer's guide](https://cloud.google.com/iam).", - "properties": { - "version": { - "type": "integer", - "description": "Version of the `Policy`. The default version is 0.", - "format": "int32" - }, - "bindings": { - "type": "array", - "description": "Associates a list of `members` to a `role`. Multiple `bindings` must not be specified for the same `role`. `bindings` with no members will result in an error.", - "items": { - "$ref": "Binding" - } - }, - "etag": { - "type": "string", - "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. If no `etag` is provided in the call to `setIamPolicy`, then the existing policy is overwritten blindly.", - "format": "byte" - } - } - }, - "Binding": { - "id": "Binding", - "type": "object", - "description": "Associates `members` with a `role`.", - "properties": { - "role": { - "type": "string", - "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. Required" - }, - "members": { - "type": "array", - "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@gmail.com` or `joe@example.com`. * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: A Google Apps domain name that represents all the users of that domain. For example, `google.com` or `example.com`.", - "items": { - "type": "string" - } - } - } - }, - "TestIamPermissionsRequest": { - "id": "TestIamPermissionsRequest", - "type": "object", - "description": "Request message for `TestIamPermissions` method.", - "properties": { - "permissions": { - "type": "array", - "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see IAM Overview.", - "items": { - "type": "string" - } - } - } - }, - "TestIamPermissionsResponse": { - "id": "TestIamPermissionsResponse", - "type": "object", - "description": "Response message for `TestIamPermissions` method.", - "properties": { - "permissions": { - "type": "array", - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", - "items": { - "type": "string" - } - } - } - }, - "Topic": { - "id": "Topic", - "type": "object", - "description": "A topic resource.", - "properties": { - "name": { - "type": "string", - "description": "The name of the topic. It must have the format `\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`." - } - } - }, - "PublishRequest": { - "id": "PublishRequest", - "type": "object", - "description": "Request for the Publish method.", - "properties": { - "messages": { - "type": "array", - "description": "The messages to publish.", - "items": { - "$ref": "PubsubMessage" - } - } - } - }, - "PubsubMessage": { - "id": "PubsubMessage", - "type": "object", - "description": "A message data and its attributes. The message payload must not be empty; it must contain either a non-empty data field, or at least one attribute.", - "properties": { - "data": { - "type": "string", - "description": "The message payload. For JSON requests, the value of this field must be base64-encoded.", - "format": "byte" - }, - "attributes": { - "type": "object", - "description": "Optional attributes for this message.", - "additionalProperties": { - "type": "string" - } - }, - "messageId": { - "type": "string", - "description": "ID of this message, assigned by the server when the message is published. Guaranteed to be unique within the topic. This value may be read by a subscriber that receives a `PubsubMessage` via a `Pull` call or a push delivery. It must not be populated by the publisher in a `Publish` call." - }, - "publishTime": { - "type": "string", - "description": "The time at which the message was published, populated by the server when it receives the `Publish` call. It must not be populated by the publisher in a `Publish` call." - } - } - }, - "PublishResponse": { - "id": "PublishResponse", - "type": "object", - "description": "Response for the `Publish` method.", - "properties": { - "messageIds": { - "type": "array", - "description": "The server-assigned ID of each published message, in the same order as the messages in the request. IDs are guaranteed to be unique within the topic.", - "items": { - "type": "string" - } - } - } - }, - "ListTopicsResponse": { - "id": "ListTopicsResponse", - "type": "object", - "description": "Response for the `ListTopics` method.", - "properties": { - "topics": { - "type": "array", - "description": "The resulting topics.", - "items": { - "$ref": "Topic" - } - }, - "nextPageToken": { - "type": "string", - "description": "If not empty, indicates that there may be more topics that match the request; this value should be passed in a new `ListTopicsRequest`." - } - } - }, - "ListTopicSubscriptionsResponse": { - "id": "ListTopicSubscriptionsResponse", - "type": "object", - "description": "Response for the `ListTopicSubscriptions` method.", - "properties": { - "subscriptions": { - "type": "array", - "description": "The names of the subscriptions that match the request.", - "items": { - "type": "string" - } - }, - "nextPageToken": { - "type": "string", - "description": "If not empty, indicates that there may be more subscriptions that match the request; this value should be passed in a new `ListTopicSubscriptionsRequest` to get more subscriptions." - } - } - }, - "Empty": { - "id": "Empty", - "type": "object", - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`." - }, - "Subscription": { - "id": "Subscription", - "type": "object", - "description": "A subscription resource.", - "properties": { - "name": { - "type": "string", - "description": "The name of the subscription. It must have the format `\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`." - }, - "topic": { - "type": "string", - "description": "The name of the topic from which this subscription is receiving messages. The value of this field will be `_deleted-topic_` if the topic has been deleted." - }, - "pushConfig": { - "$ref": "PushConfig", - "description": "If push delivery is used with this subscription, this field is used to configure it. An empty `pushConfig` signifies that the subscriber will pull and ack messages using API methods." - }, - "ackDeadlineSeconds": { - "type": "integer", - "description": "This value is the maximum time after a subscriber receives a message before the subscriber should acknowledge the message. After message delivery but before the ack deadline expires and before the message is acknowledged, it is an outstanding message and will not be delivered again during that time (on a best-effort basis). For pull subscriptions, this value is used as the initial value for the ack deadline. To override this value for a given message, call `ModifyAckDeadline` with the corresponding `ack_id` if using pull. For push delivery, this value is also used to set the request timeout for the call to the push endpoint. If the subscriber never acknowledges the message, the Pub/Sub system will eventually redeliver the message. If this parameter is not set, the default value of 10 seconds is used.", - "format": "int32" - } - } - }, - "PushConfig": { - "id": "PushConfig", - "type": "object", - "description": "Configuration for a push delivery endpoint.", - "properties": { - "pushEndpoint": { - "type": "string", - "description": "A URL locating the endpoint to which messages should be pushed. For example, a Webhook endpoint might use \"https://example.com/push\"." - }, - "attributes": { - "type": "object", - "description": "Endpoint configuration attributes. Every endpoint has a set of API supported attributes that can be used to control different aspects of the message delivery. The currently supported attribute is `x-goog-version`, which you can use to change the format of the push message. This attribute indicates the version of the data expected by the endpoint. This controls the shape of the envelope (i.e. its fields and metadata). The endpoint version is based on the version of the Pub/Sub API. If not present during the `CreateSubscription` call, it will default to the version of the API used to make such call. If not present during a `ModifyPushConfig` call, its value will not be changed. `GetSubscription` calls will always return a valid version, even if the subscription was created without this attribute. The possible values for this attribute are: * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API. * `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API.", - "additionalProperties": { - "type": "string" - } - } - } - }, - "ListSubscriptionsResponse": { - "id": "ListSubscriptionsResponse", - "type": "object", - "description": "Response for the `ListSubscriptions` method.", - "properties": { - "subscriptions": { - "type": "array", - "description": "The subscriptions that match the request.", - "items": { - "$ref": "Subscription" - } - }, - "nextPageToken": { - "type": "string", - "description": "If not empty, indicates that there may be more subscriptions that match the request; this value should be passed in a new `ListSubscriptionsRequest` to get more subscriptions." - } - } - }, - "ModifyAckDeadlineRequest": { - "id": "ModifyAckDeadlineRequest", - "type": "object", - "description": "Request for the ModifyAckDeadline method.", - "properties": { - "ackIds": { - "type": "array", - "description": "List of acknowledgment IDs.", - "items": { - "type": "string" - } - }, - "ackDeadlineSeconds": { - "type": "integer", - "description": "The new ack deadline with respect to the time this request was sent to the Pub/Sub system. Must be \u003e= 0. For example, if the value is 10, the new ack deadline will expire 10 seconds after the `ModifyAckDeadline` call was made. Specifying zero may immediately make the message available for another pull request.", - "format": "int32" - } - } - }, - "AcknowledgeRequest": { - "id": "AcknowledgeRequest", - "type": "object", - "description": "Request for the Acknowledge method.", - "properties": { - "ackIds": { - "type": "array", - "description": "The acknowledgment ID for the messages being acknowledged that was returned by the Pub/Sub system in the `Pull` response. Must not be empty.", - "items": { - "type": "string" - } - } - } - }, - "PullRequest": { - "id": "PullRequest", - "type": "object", - "description": "Request for the `Pull` method.", - "properties": { - "returnImmediately": { - "type": "boolean", - "description": "If this is specified as true the system will respond immediately even if it is not able to return a message in the `Pull` response. Otherwise the system is allowed to wait until at least one message is available rather than returning no messages. The client may cancel the request if it does not wish to wait any longer for the response." - }, - "maxMessages": { - "type": "integer", - "description": "The maximum number of messages returned for this request. The Pub/Sub system may return fewer than the number specified.", - "format": "int32" - } - } - }, - "PullResponse": { - "id": "PullResponse", - "type": "object", - "description": "Response for the `Pull` method.", - "properties": { - "receivedMessages": { - "type": "array", - "description": "Received Pub/Sub messages. The Pub/Sub system will return zero messages if there are no more available in the backlog. The Pub/Sub system may return fewer than the `maxMessages` requested even if there are more messages available in the backlog.", - "items": { - "$ref": "ReceivedMessage" - } - } - } - }, - "ReceivedMessage": { - "id": "ReceivedMessage", - "type": "object", - "description": "A message and its corresponding acknowledgment ID.", - "properties": { - "ackId": { - "type": "string", - "description": "This ID can be used to acknowledge the received message." - }, - "message": { - "$ref": "PubsubMessage", - "description": "The message." - } - } - }, - "ModifyPushConfigRequest": { - "id": "ModifyPushConfigRequest", - "type": "object", - "description": "Request for the ModifyPushConfig method.", - "properties": { - "pushConfig": { - "$ref": "PushConfig", - "description": "The push configuration for future deliveries. An empty `pushConfig` indicates that the Pub/Sub system should stop pushing messages from the given subscription and allow messages to be pulled and acknowledged - effectively pausing the subscription if `Pull` is not called." - } - } - } - }, - "resources": { - "projects": { - "resources": { - "topics": { - "methods": { - "setIamPolicy": { - "id": "pubsub.projects.topics.setIamPolicy", - "path": "v1/{+resource}:setIamPolicy", - "httpMethod": "POST", - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - "parameters": { - "resource": { - "type": "string", - "description": "REQUIRED: The resource for which the policy is being specified. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `setIamPolicy` documentation.", - "required": true, - "pattern": "^projects/[^/]*/topics/[^/]*$", - "location": "path" - } - }, - "parameterOrder": [ - "resource" - ], - "request": { - "$ref": "SetIamPolicyRequest" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "getIamPolicy": { - "id": "pubsub.projects.topics.getIamPolicy", - "path": "v1/{+resource}:getIamPolicy", - "httpMethod": "GET", - "description": "Gets the access control policy for a `resource`. Returns an empty policy if the resource exists and does not have a policy set.", - "parameters": { - "resource": { - "type": "string", - "description": "REQUIRED: The resource for which the policy is being requested. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `getIamPolicy` documentation.", - "required": true, - "pattern": "^projects/[^/]*/topics/[^/]*$", - "location": "path" - } - }, - "parameterOrder": [ - "resource" - ], - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "testIamPermissions": { - "id": "pubsub.projects.topics.testIamPermissions", - "path": "v1/{+resource}:testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "resource": { - "type": "string", - "description": "REQUIRED: The resource for which the policy detail is being requested. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `testIamPermissions` documentation.", - "required": true, - "pattern": "^projects/[^/]*/topics/[^/]*$", - "location": "path" - } - }, - "parameterOrder": [ - "resource" - ], - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "create": { - "id": "pubsub.projects.topics.create", - "path": "v1/{+name}", - "httpMethod": "PUT", - "description": "Creates the given topic with the given name.", - "parameters": { - "name": { - "type": "string", - "description": "The name of the topic. It must have the format `\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", - "required": true, - "pattern": "^projects/[^/]*/topics/[^/]*$", - "location": "path" - } - }, - "parameterOrder": [ - "name" - ], - "request": { - "$ref": "Topic" - }, - "response": { - "$ref": "Topic" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "publish": { - "id": "pubsub.projects.topics.publish", - "path": "v1/{+topic}:publish", - "httpMethod": "POST", - "description": "Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic does not exist. The message payload must not be empty; it must contain either a non-empty data field, or at least one attribute.", - "parameters": { - "topic": { - "type": "string", - "description": "The messages in the request will be published on this topic.", - "required": true, - "pattern": "^projects/[^/]*/topics/[^/]*$", - "location": "path" - } - }, - "parameterOrder": [ - "topic" - ], - "request": { - "$ref": "PublishRequest" - }, - "response": { - "$ref": "PublishResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "get": { - "id": "pubsub.projects.topics.get", - "path": "v1/{+topic}", - "httpMethod": "GET", - "description": "Gets the configuration of a topic.", - "parameters": { - "topic": { - "type": "string", - "description": "The name of the topic to get.", - "required": true, - "pattern": "^projects/[^/]*/topics/[^/]*$", - "location": "path" - } - }, - "parameterOrder": [ - "topic" - ], - "response": { - "$ref": "Topic" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "list": { - "id": "pubsub.projects.topics.list", - "path": "v1/{+project}/topics", - "httpMethod": "GET", - "description": "Lists matching topics.", - "parameters": { - "project": { - "type": "string", - "description": "The name of the cloud project that topics belong to.", - "required": true, - "pattern": "^projects/[^/]*$", - "location": "path" - }, - "pageSize": { - "type": "integer", - "description": "Maximum number of topics to return.", - "format": "int32", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "The value returned by the last `ListTopicsResponse`; indicates that this is a continuation of a prior `ListTopics` call, and that the system should return the next page of data.", - "location": "query" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "ListTopicsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "delete": { - "id": "pubsub.projects.topics.delete", - "path": "v1/{+topic}", - "httpMethod": "DELETE", - "description": "Deletes the topic with the given name. Returns `NOT_FOUND` if the topic does not exist. After a topic is deleted, a new topic may be created with the same name; this is an entirely new topic with none of the old configuration or subscriptions. Existing subscriptions to this topic are not deleted, but their `topic` field is set to `_deleted-topic_`.", - "parameters": { - "topic": { - "type": "string", - "description": "Name of the topic to delete.", - "required": true, - "pattern": "^projects/[^/]*/topics/[^/]*$", - "location": "path" - } - }, - "parameterOrder": [ - "topic" - ], - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - } - }, - "resources": { - "subscriptions": { - "methods": { - "list": { - "id": "pubsub.projects.topics.subscriptions.list", - "path": "v1/{+topic}/subscriptions", - "httpMethod": "GET", - "description": "Lists the name of the subscriptions for this topic.", - "parameters": { - "topic": { - "type": "string", - "description": "The name of the topic that subscriptions are attached to.", - "required": true, - "pattern": "^projects/[^/]*/topics/[^/]*$", - "location": "path" - }, - "pageSize": { - "type": "integer", - "description": "Maximum number of subscription names to return.", - "format": "int32", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates that this is a continuation of a prior `ListTopicSubscriptions` call, and that the system should return the next page of data.", - "location": "query" - } - }, - "parameterOrder": [ - "topic" - ], - "response": { - "$ref": "ListTopicSubscriptionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - } - } - } - } - }, - "subscriptions": { - "methods": { - "setIamPolicy": { - "id": "pubsub.projects.subscriptions.setIamPolicy", - "path": "v1/{+resource}:setIamPolicy", - "httpMethod": "POST", - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - "parameters": { - "resource": { - "type": "string", - "description": "REQUIRED: The resource for which the policy is being specified. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `setIamPolicy` documentation.", - "required": true, - "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - "location": "path" - } - }, - "parameterOrder": [ - "resource" - ], - "request": { - "$ref": "SetIamPolicyRequest" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "getIamPolicy": { - "id": "pubsub.projects.subscriptions.getIamPolicy", - "path": "v1/{+resource}:getIamPolicy", - "httpMethod": "GET", - "description": "Gets the access control policy for a `resource`. Returns an empty policy if the resource exists and does not have a policy set.", - "parameters": { - "resource": { - "type": "string", - "description": "REQUIRED: The resource for which the policy is being requested. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `getIamPolicy` documentation.", - "required": true, - "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - "location": "path" - } - }, - "parameterOrder": [ - "resource" - ], - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "testIamPermissions": { - "id": "pubsub.projects.subscriptions.testIamPermissions", - "path": "v1/{+resource}:testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "resource": { - "type": "string", - "description": "REQUIRED: The resource for which the policy detail is being requested. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `testIamPermissions` documentation.", - "required": true, - "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - "location": "path" - } - }, - "parameterOrder": [ - "resource" - ], - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "create": { - "id": "pubsub.projects.subscriptions.create", - "path": "v1/{+name}", - "httpMethod": "PUT", - "description": "Creates a subscription to a given topic. If the subscription already exists, returns `ALREADY_EXISTS`. If the corresponding topic doesn't exist, returns `NOT_FOUND`. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic.", - "parameters": { - "name": { - "type": "string", - "description": "The name of the subscription. It must have the format `\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", - "required": true, - "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - "location": "path" - } - }, - "parameterOrder": [ - "name" - ], - "request": { - "$ref": "Subscription" - }, - "response": { - "$ref": "Subscription" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "get": { - "id": "pubsub.projects.subscriptions.get", - "path": "v1/{+subscription}", - "httpMethod": "GET", - "description": "Gets the configuration details of a subscription.", - "parameters": { - "subscription": { - "type": "string", - "description": "The name of the subscription to get.", - "required": true, - "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - "location": "path" - } - }, - "parameterOrder": [ - "subscription" - ], - "response": { - "$ref": "Subscription" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "list": { - "id": "pubsub.projects.subscriptions.list", - "path": "v1/{+project}/subscriptions", - "httpMethod": "GET", - "description": "Lists matching subscriptions.", - "parameters": { - "project": { - "type": "string", - "description": "The name of the cloud project that subscriptions belong to.", - "required": true, - "pattern": "^projects/[^/]*$", - "location": "path" - }, - "pageSize": { - "type": "integer", - "description": "Maximum number of subscriptions to return.", - "format": "int32", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that this is a continuation of a prior `ListSubscriptions` call, and that the system should return the next page of data.", - "location": "query" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "ListSubscriptionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "delete": { - "id": "pubsub.projects.subscriptions.delete", - "path": "v1/{+subscription}", - "httpMethod": "DELETE", - "description": "Deletes an existing subscription. All pending messages in the subscription are immediately dropped. Calls to `Pull` after deletion will return `NOT_FOUND`. After a subscription is deleted, a new one may be created with the same name, but the new one has no association with the old subscription, or its topic unless the same topic is specified.", - "parameters": { - "subscription": { - "type": "string", - "description": "The subscription to delete.", - "required": true, - "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - "location": "path" - } - }, - "parameterOrder": [ - "subscription" - ], - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "modifyAckDeadline": { - "id": "pubsub.projects.subscriptions.modifyAckDeadline", - "path": "v1/{+subscription}:modifyAckDeadline", - "httpMethod": "POST", - "description": "Modifies the ack deadline for a specific message. This method is useful to indicate that more time is needed to process a message by the subscriber, or to make the message available for redelivery if the processing was interrupted.", - "parameters": { - "subscription": { - "type": "string", - "description": "The name of the subscription.", - "required": true, - "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - "location": "path" - } - }, - "parameterOrder": [ - "subscription" - ], - "request": { - "$ref": "ModifyAckDeadlineRequest" - }, - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "acknowledge": { - "id": "pubsub.projects.subscriptions.acknowledge", - "path": "v1/{+subscription}:acknowledge", - "httpMethod": "POST", - "description": "Acknowledges the messages associated with the `ack_ids` in the `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages from the subscription. Acknowledging a message whose ack deadline has expired may succeed, but such a message may be redelivered later. Acknowledging a message more than once will not result in an error.", - "parameters": { - "subscription": { - "type": "string", - "description": "The subscription whose message is being acknowledged.", - "required": true, - "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - "location": "path" - } - }, - "parameterOrder": [ - "subscription" - ], - "request": { - "$ref": "AcknowledgeRequest" - }, - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "pull": { - "id": "pubsub.projects.subscriptions.pull", - "path": "v1/{+subscription}:pull", - "httpMethod": "POST", - "description": "Pulls messages from the server. Returns an empty list if there are no messages available in the backlog. The server may return `UNAVAILABLE` if there are too many concurrent pull requests pending for the given subscription.", - "parameters": { - "subscription": { - "type": "string", - "description": "The subscription from which messages should be pulled.", - "required": true, - "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - "location": "path" - } - }, - "parameterOrder": [ - "subscription" - ], - "request": { - "$ref": "PullRequest" - }, - "response": { - "$ref": "PullResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "modifyPushConfig": { - "id": "pubsub.projects.subscriptions.modifyPushConfig", - "path": "v1/{+subscription}:modifyPushConfig", - "httpMethod": "POST", - "description": "Modifies the `PushConfig` for a specified subscription. This may be used to change a push subscription to a pull one (signified by an empty `PushConfig`) or vice versa, or change the endpoint URL and other attributes of a push subscription. Messages will accumulate for delivery continuously through the call regardless of changes to the `PushConfig`.", - "parameters": { - "subscription": { - "type": "string", - "description": "The name of the subscription.", - "required": true, - "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - "location": "path" - } - }, - "parameterOrder": [ - "subscription" - ], - "request": { - "$ref": "ModifyPushConfigRequest" - }, - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - } - } - } - } - } - } -} diff --git a/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go b/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go deleted file mode 100644 index 236257fe8bcb..000000000000 --- a/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go +++ /dev/null @@ -1,3379 +0,0 @@ -// Package pubsub provides access to the Google Cloud Pub/Sub API. -// -// See https://cloud.google.com/pubsub/docs -// -// Usage example: -// -// import "google.golang.org/api/pubsub/v1" -// ... -// pubsubService, err := pubsub.New(oauthHttpClient) -package pubsub - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - context "golang.org/x/net/context" - ctxhttp "golang.org/x/net/context/ctxhttp" - gensupport "google.golang.org/api/gensupport" - googleapi "google.golang.org/api/googleapi" - "io" - "net/http" - "net/url" - "strconv" - "strings" -) - -// Always reference these packages, just in case the auto-generated code -// below doesn't. -var _ = bytes.NewBuffer -var _ = strconv.Itoa -var _ = fmt.Sprintf -var _ = json.NewDecoder -var _ = io.Copy -var _ = url.Parse -var _ = gensupport.MarshalJSON -var _ = googleapi.Version -var _ = errors.New -var _ = strings.Replace -var _ = context.Canceled -var _ = ctxhttp.Do - -const apiId = "pubsub:v1" -const apiName = "pubsub" -const apiVersion = "v1" -const basePath = "https://pubsub.googleapis.com/" - -// OAuth2 scopes used by this API. -const ( - // View and manage your data across Google Cloud Platform services - CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" - - // View and manage Pub/Sub topics and subscriptions - PubsubScope = "https://www.googleapis.com/auth/pubsub" -) - -func New(client *http.Client) (*Service, error) { - if client == nil { - return nil, errors.New("client is nil") - } - s := &Service{client: client, BasePath: basePath} - s.Projects = NewProjectsService(s) - return s, nil -} - -type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment - - Projects *ProjectsService -} - -func (s *Service) userAgent() string { - if s.UserAgent == "" { - return googleapi.UserAgent - } - return googleapi.UserAgent + " " + s.UserAgent -} - -func NewProjectsService(s *Service) *ProjectsService { - rs := &ProjectsService{s: s} - rs.Subscriptions = NewProjectsSubscriptionsService(s) - rs.Topics = NewProjectsTopicsService(s) - return rs -} - -type ProjectsService struct { - s *Service - - Subscriptions *ProjectsSubscriptionsService - - Topics *ProjectsTopicsService -} - -func NewProjectsSubscriptionsService(s *Service) *ProjectsSubscriptionsService { - rs := &ProjectsSubscriptionsService{s: s} - return rs -} - -type ProjectsSubscriptionsService struct { - s *Service -} - -func NewProjectsTopicsService(s *Service) *ProjectsTopicsService { - rs := &ProjectsTopicsService{s: s} - rs.Subscriptions = NewProjectsTopicsSubscriptionsService(s) - return rs -} - -type ProjectsTopicsService struct { - s *Service - - Subscriptions *ProjectsTopicsSubscriptionsService -} - -func NewProjectsTopicsSubscriptionsService(s *Service) *ProjectsTopicsSubscriptionsService { - rs := &ProjectsTopicsSubscriptionsService{s: s} - return rs -} - -type ProjectsTopicsSubscriptionsService struct { - s *Service -} - -// AcknowledgeRequest: Request for the Acknowledge method. -type AcknowledgeRequest struct { - // AckIds: The acknowledgment ID for the messages being acknowledged - // that was returned by the Pub/Sub system in the `Pull` response. Must - // not be empty. - AckIds []string `json:"ackIds,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AckIds") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *AcknowledgeRequest) MarshalJSON() ([]byte, error) { - type noMethod AcknowledgeRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// Binding: Associates `members` with a `role`. -type Binding struct { - // Members: Specifies the identities requesting access for a Cloud - // Platform resource. `members` can have the following values: * - // `allUsers`: A special identifier that represents anyone who is on the - // internet; with or without a Google account. * - // `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. * - // `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@gmail.com` or `joe@example.com`. * - // `serviceAccount:{emailid}`: An email address that represents a - // service account. For example, - // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An - // email address that represents a Google group. For example, - // `admins@example.com`. * `domain:{domain}`: A Google Apps domain name - // that represents all the users of that domain. For example, - // `google.com` or `example.com`. - Members []string `json:"members,omitempty"` - - // Role: Role that is assigned to `members`. For example, - // `roles/viewer`, `roles/editor`, or `roles/owner`. Required - Role string `json:"role,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Members") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *Binding) MarshalJSON() ([]byte, error) { - type noMethod Binding - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// Empty: A generic empty message that you can re-use to avoid defining -// duplicated empty messages in your APIs. A typical example is to use -// it as the request or the response type of an API method. For -// instance: service Foo { rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); } The JSON representation for `Empty` is -// empty JSON object `{}`. -type Empty struct { - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` -} - -// ListSubscriptionsResponse: Response for the `ListSubscriptions` -// method. -type ListSubscriptionsResponse struct { - // NextPageToken: If not empty, indicates that there may be more - // subscriptions that match the request; this value should be passed in - // a new `ListSubscriptionsRequest` to get more subscriptions. - NextPageToken string `json:"nextPageToken,omitempty"` - - // Subscriptions: The subscriptions that match the request. - Subscriptions []*Subscription `json:"subscriptions,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "NextPageToken") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *ListSubscriptionsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListSubscriptionsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// ListTopicSubscriptionsResponse: Response for the -// `ListTopicSubscriptions` method. -type ListTopicSubscriptionsResponse struct { - // NextPageToken: If not empty, indicates that there may be more - // subscriptions that match the request; this value should be passed in - // a new `ListTopicSubscriptionsRequest` to get more subscriptions. - NextPageToken string `json:"nextPageToken,omitempty"` - - // Subscriptions: The names of the subscriptions that match the request. - Subscriptions []string `json:"subscriptions,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "NextPageToken") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *ListTopicSubscriptionsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListTopicSubscriptionsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// ListTopicsResponse: Response for the `ListTopics` method. -type ListTopicsResponse struct { - // NextPageToken: If not empty, indicates that there may be more topics - // that match the request; this value should be passed in a new - // `ListTopicsRequest`. - NextPageToken string `json:"nextPageToken,omitempty"` - - // Topics: The resulting topics. - Topics []*Topic `json:"topics,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "NextPageToken") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *ListTopicsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListTopicsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// ModifyAckDeadlineRequest: Request for the ModifyAckDeadline method. -type ModifyAckDeadlineRequest struct { - // AckDeadlineSeconds: The new ack deadline with respect to the time - // this request was sent to the Pub/Sub system. Must be >= 0. For - // example, if the value is 10, the new ack deadline will expire 10 - // seconds after the `ModifyAckDeadline` call was made. Specifying zero - // may immediately make the message available for another pull request. - AckDeadlineSeconds int64 `json:"ackDeadlineSeconds,omitempty"` - - // AckIds: List of acknowledgment IDs. - AckIds []string `json:"ackIds,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AckDeadlineSeconds") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *ModifyAckDeadlineRequest) MarshalJSON() ([]byte, error) { - type noMethod ModifyAckDeadlineRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// ModifyPushConfigRequest: Request for the ModifyPushConfig method. -type ModifyPushConfigRequest struct { - // PushConfig: The push configuration for future deliveries. An empty - // `pushConfig` indicates that the Pub/Sub system should stop pushing - // messages from the given subscription and allow messages to be pulled - // and acknowledged - effectively pausing the subscription if `Pull` is - // not called. - PushConfig *PushConfig `json:"pushConfig,omitempty"` - - // ForceSendFields is a list of field names (e.g. "PushConfig") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *ModifyPushConfigRequest) MarshalJSON() ([]byte, error) { - type noMethod ModifyPushConfigRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// Policy: Defines an Identity and Access Management (IAM) policy. It is -// used to specify access control policies for Cloud Platform resources. -// A `Policy` consists of a list of `bindings`. A `Binding` binds a list -// of `members` to a `role`, where the members can be user accounts, -// Google groups, Google domains, and service accounts. A `role` is a -// named list of permissions defined by IAM. **Example** { "bindings": [ -// { "role": "roles/owner", "members": [ "user:mike@example.com", -// "group:admins@example.com", "domain:google.com", -// "serviceAccount:my-other-app@appspot.gserviceaccount.com", ] }, { -// "role": "roles/viewer", "members": ["user:sean@example.com"] } ] } -// For a description of IAM and its features, see the [IAM developer's -// guide](https://cloud.google.com/iam). -type Policy struct { - // Bindings: Associates a list of `members` to a `role`. Multiple - // `bindings` must not be specified for the same `role`. `bindings` with - // no members will result in an error. - Bindings []*Binding `json:"bindings,omitempty"` - - // Etag: `etag` is used for optimistic concurrency control as a way to - // help prevent simultaneous updates of a policy from overwriting each - // other. It is strongly suggested that systems make use of the `etag` - // in the read-modify-write cycle to perform policy updates in order to - // avoid race conditions: An `etag` is returned in the response to - // `getIamPolicy`, and systems are expected to put that etag in the - // request to `setIamPolicy` to ensure that their change will be applied - // to the same version of the policy. If no `etag` is provided in the - // call to `setIamPolicy`, then the existing policy is overwritten - // blindly. - Etag string `json:"etag,omitempty"` - - // Version: Version of the `Policy`. The default version is 0. - Version int64 `json:"version,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Bindings") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *Policy) MarshalJSON() ([]byte, error) { - type noMethod Policy - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// PublishRequest: Request for the Publish method. -type PublishRequest struct { - // Messages: The messages to publish. - Messages []*PubsubMessage `json:"messages,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Messages") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *PublishRequest) MarshalJSON() ([]byte, error) { - type noMethod PublishRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// PublishResponse: Response for the `Publish` method. -type PublishResponse struct { - // MessageIds: The server-assigned ID of each published message, in the - // same order as the messages in the request. IDs are guaranteed to be - // unique within the topic. - MessageIds []string `json:"messageIds,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "MessageIds") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *PublishResponse) MarshalJSON() ([]byte, error) { - type noMethod PublishResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// PubsubMessage: A message data and its attributes. The message payload -// must not be empty; it must contain either a non-empty data field, or -// at least one attribute. -type PubsubMessage struct { - // Attributes: Optional attributes for this message. - Attributes map[string]string `json:"attributes,omitempty"` - - // Data: The message payload. For JSON requests, the value of this field - // must be base64-encoded. - Data string `json:"data,omitempty"` - - // MessageId: ID of this message, assigned by the server when the - // message is published. Guaranteed to be unique within the topic. This - // value may be read by a subscriber that receives a `PubsubMessage` via - // a `Pull` call or a push delivery. It must not be populated by the - // publisher in a `Publish` call. - MessageId string `json:"messageId,omitempty"` - - // PublishTime: The time at which the message was published, populated - // by the server when it receives the `Publish` call. It must not be - // populated by the publisher in a `Publish` call. - PublishTime string `json:"publishTime,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Attributes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *PubsubMessage) MarshalJSON() ([]byte, error) { - type noMethod PubsubMessage - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// PullRequest: Request for the `Pull` method. -type PullRequest struct { - // MaxMessages: The maximum number of messages returned for this - // request. The Pub/Sub system may return fewer than the number - // specified. - MaxMessages int64 `json:"maxMessages,omitempty"` - - // ReturnImmediately: If this is specified as true the system will - // respond immediately even if it is not able to return a message in the - // `Pull` response. Otherwise the system is allowed to wait until at - // least one message is available rather than returning no messages. The - // client may cancel the request if it does not wish to wait any longer - // for the response. - ReturnImmediately bool `json:"returnImmediately,omitempty"` - - // ForceSendFields is a list of field names (e.g. "MaxMessages") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *PullRequest) MarshalJSON() ([]byte, error) { - type noMethod PullRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// PullResponse: Response for the `Pull` method. -type PullResponse struct { - // ReceivedMessages: Received Pub/Sub messages. The Pub/Sub system will - // return zero messages if there are no more available in the backlog. - // The Pub/Sub system may return fewer than the `maxMessages` requested - // even if there are more messages available in the backlog. - ReceivedMessages []*ReceivedMessage `json:"receivedMessages,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "ReceivedMessages") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *PullResponse) MarshalJSON() ([]byte, error) { - type noMethod PullResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// PushConfig: Configuration for a push delivery endpoint. -type PushConfig struct { - // Attributes: Endpoint configuration attributes. Every endpoint has a - // set of API supported attributes that can be used to control different - // aspects of the message delivery. The currently supported attribute is - // `x-goog-version`, which you can use to change the format of the push - // message. This attribute indicates the version of the data expected by - // the endpoint. This controls the shape of the envelope (i.e. its - // fields and metadata). The endpoint version is based on the version of - // the Pub/Sub API. If not present during the `CreateSubscription` call, - // it will default to the version of the API used to make such call. If - // not present during a `ModifyPushConfig` call, its value will not be - // changed. `GetSubscription` calls will always return a valid version, - // even if the subscription was created without this attribute. The - // possible values for this attribute are: * `v1beta1`: uses the push - // format defined in the v1beta1 Pub/Sub API. * `v1` or `v1beta2`: uses - // the push format defined in the v1 Pub/Sub API. - Attributes map[string]string `json:"attributes,omitempty"` - - // PushEndpoint: A URL locating the endpoint to which messages should be - // pushed. For example, a Webhook endpoint might use - // "https://example.com/push". - PushEndpoint string `json:"pushEndpoint,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Attributes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *PushConfig) MarshalJSON() ([]byte, error) { - type noMethod PushConfig - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// ReceivedMessage: A message and its corresponding acknowledgment ID. -type ReceivedMessage struct { - // AckId: This ID can be used to acknowledge the received message. - AckId string `json:"ackId,omitempty"` - - // Message: The message. - Message *PubsubMessage `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AckId") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *ReceivedMessage) MarshalJSON() ([]byte, error) { - type noMethod ReceivedMessage - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// SetIamPolicyRequest: Request message for `SetIamPolicy` method. -type SetIamPolicyRequest struct { - // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of the policy is limited to a few 10s of KB. An - // empty policy is a valid policy but certain Cloud Platform services - // (such as Projects) might reject them. - Policy *Policy `json:"policy,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Policy") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { - type noMethod SetIamPolicyRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// Subscription: A subscription resource. -type Subscription struct { - // AckDeadlineSeconds: This value is the maximum time after a subscriber - // receives a message before the subscriber should acknowledge the - // message. After message delivery but before the ack deadline expires - // and before the message is acknowledged, it is an outstanding message - // and will not be delivered again during that time (on a best-effort - // basis). For pull subscriptions, this value is used as the initial - // value for the ack deadline. To override this value for a given - // message, call `ModifyAckDeadline` with the corresponding `ack_id` if - // using pull. For push delivery, this value is also used to set the - // request timeout for the call to the push endpoint. If the subscriber - // never acknowledges the message, the Pub/Sub system will eventually - // redeliver the message. If this parameter is not set, the default - // value of 10 seconds is used. - AckDeadlineSeconds int64 `json:"ackDeadlineSeconds,omitempty"` - - // Name: The name of the subscription. It must have the format - // "projects/{project}/subscriptions/{subscription}". `{subscription}` - // must start with a letter, and contain only letters (`[A-Za-z]`), - // numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), - // tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 - // and 255 characters in length, and it must not start with "goog". - Name string `json:"name,omitempty"` - - // PushConfig: If push delivery is used with this subscription, this - // field is used to configure it. An empty `pushConfig` signifies that - // the subscriber will pull and ack messages using API methods. - PushConfig *PushConfig `json:"pushConfig,omitempty"` - - // Topic: The name of the topic from which this subscription is - // receiving messages. The value of this field will be `_deleted-topic_` - // if the topic has been deleted. - Topic string `json:"topic,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "AckDeadlineSeconds") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *Subscription) MarshalJSON() ([]byte, error) { - type noMethod Subscription - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// TestIamPermissionsRequest: Request message for `TestIamPermissions` -// method. -type TestIamPermissionsRequest struct { - // Permissions: The set of permissions to check for the `resource`. - // Permissions with wildcards (such as '*' or 'storage.*') are not - // allowed. For more information see IAM Overview. - Permissions []string `json:"permissions,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Permissions") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { - type noMethod TestIamPermissionsRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// TestIamPermissionsResponse: Response message for `TestIamPermissions` -// method. -type TestIamPermissionsResponse struct { - // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is allowed. - Permissions []string `json:"permissions,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Permissions") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { - type noMethod TestIamPermissionsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// Topic: A topic resource. -type Topic struct { - // Name: The name of the topic. It must have the format - // "projects/{project}/topics/{topic}". `{topic}` must start with a - // letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), - // dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus - // (`+`) or percent signs (`%`). It must be between 3 and 255 characters - // in length, and it must not start with "goog". - Name string `json:"name,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Name") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *Topic) MarshalJSON() ([]byte, error) { - type noMethod Topic - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// method id "pubsub.projects.subscriptions.acknowledge": - -type ProjectsSubscriptionsAcknowledgeCall struct { - s *Service - subscription string - acknowledgerequest *AcknowledgeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Acknowledge: Acknowledges the messages associated with the `ack_ids` -// in the `AcknowledgeRequest`. The Pub/Sub system can remove the -// relevant messages from the subscription. Acknowledging a message -// whose ack deadline has expired may succeed, but such a message may be -// redelivered later. Acknowledging a message more than once will not -// result in an error. -func (r *ProjectsSubscriptionsService) Acknowledge(subscription string, acknowledgerequest *AcknowledgeRequest) *ProjectsSubscriptionsAcknowledgeCall { - c := &ProjectsSubscriptionsAcknowledgeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.subscription = subscription - c.acknowledgerequest = acknowledgerequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsAcknowledgeCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsAcknowledgeCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsAcknowledgeCall) Context(ctx context.Context) *ProjectsSubscriptionsAcknowledgeCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsSubscriptionsAcknowledgeCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.acknowledgerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}:acknowledge") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "subscription": c.subscription, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.acknowledge" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSubscriptionsAcknowledgeCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Acknowledges the messages associated with the `ack_ids` in the `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages from the subscription. Acknowledging a message whose ack deadline has expired may succeed, but such a message may be redelivered later. Acknowledging a message more than once will not result in an error.", - // "httpMethod": "POST", - // "id": "pubsub.projects.subscriptions.acknowledge", - // "parameterOrder": [ - // "subscription" - // ], - // "parameters": { - // "subscription": { - // "description": "The subscription whose message is being acknowledged.", - // "location": "path", - // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+subscription}:acknowledge", - // "request": { - // "$ref": "AcknowledgeRequest" - // }, - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.create": - -type ProjectsSubscriptionsCreateCall struct { - s *Service - name string - subscription *Subscription - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Create: Creates a subscription to a given topic. If the subscription -// already exists, returns `ALREADY_EXISTS`. If the corresponding topic -// doesn't exist, returns `NOT_FOUND`. If the name is not provided in -// the request, the server will assign a random name for this -// subscription on the same project as the topic. -func (r *ProjectsSubscriptionsService) Create(name string, subscription *Subscription) *ProjectsSubscriptionsCreateCall { - c := &ProjectsSubscriptionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.subscription = subscription - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsCreateCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsCreateCall) Context(ctx context.Context) *ProjectsSubscriptionsCreateCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsSubscriptionsCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.subscription) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.create" call. -// Exactly one of *Subscription or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Subscription.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsSubscriptionsCreateCall) Do(opts ...googleapi.CallOption) (*Subscription, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Subscription{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a subscription to a given topic. If the subscription already exists, returns `ALREADY_EXISTS`. If the corresponding topic doesn't exist, returns `NOT_FOUND`. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic.", - // "httpMethod": "PUT", - // "id": "pubsub.projects.subscriptions.create", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The name of the subscription. It must have the format `\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", - // "location": "path", - // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "request": { - // "$ref": "Subscription" - // }, - // "response": { - // "$ref": "Subscription" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.delete": - -type ProjectsSubscriptionsDeleteCall struct { - s *Service - subscription string - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Delete: Deletes an existing subscription. All pending messages in the -// subscription are immediately dropped. Calls to `Pull` after deletion -// will return `NOT_FOUND`. After a subscription is deleted, a new one -// may be created with the same name, but the new one has no association -// with the old subscription, or its topic unless the same topic is -// specified. -func (r *ProjectsSubscriptionsService) Delete(subscription string) *ProjectsSubscriptionsDeleteCall { - c := &ProjectsSubscriptionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.subscription = subscription - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsDeleteCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsDeleteCall) Context(ctx context.Context) *ProjectsSubscriptionsDeleteCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsSubscriptionsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "subscription": c.subscription, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSubscriptionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes an existing subscription. All pending messages in the subscription are immediately dropped. Calls to `Pull` after deletion will return `NOT_FOUND`. After a subscription is deleted, a new one may be created with the same name, but the new one has no association with the old subscription, or its topic unless the same topic is specified.", - // "httpMethod": "DELETE", - // "id": "pubsub.projects.subscriptions.delete", - // "parameterOrder": [ - // "subscription" - // ], - // "parameters": { - // "subscription": { - // "description": "The subscription to delete.", - // "location": "path", - // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+subscription}", - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.get": - -type ProjectsSubscriptionsGetCall struct { - s *Service - subscription string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// Get: Gets the configuration details of a subscription. -func (r *ProjectsSubscriptionsService) Get(subscription string) *ProjectsSubscriptionsGetCall { - c := &ProjectsSubscriptionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.subscription = subscription - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsGetCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsSubscriptionsGetCall) IfNoneMatch(entityTag string) *ProjectsSubscriptionsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsGetCall) Context(ctx context.Context) *ProjectsSubscriptionsGetCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsSubscriptionsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "subscription": c.subscription, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.get" call. -// Exactly one of *Subscription or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Subscription.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsSubscriptionsGetCall) Do(opts ...googleapi.CallOption) (*Subscription, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Subscription{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the configuration details of a subscription.", - // "httpMethod": "GET", - // "id": "pubsub.projects.subscriptions.get", - // "parameterOrder": [ - // "subscription" - // ], - // "parameters": { - // "subscription": { - // "description": "The name of the subscription to get.", - // "location": "path", - // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+subscription}", - // "response": { - // "$ref": "Subscription" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.getIamPolicy": - -type ProjectsSubscriptionsGetIamPolicyCall struct { - s *Service - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// GetIamPolicy: Gets the access control policy for a `resource`. -// Returns an empty policy if the resource exists and does not have a -// policy set. -func (r *ProjectsSubscriptionsService) GetIamPolicy(resource string) *ProjectsSubscriptionsGetIamPolicyCall { - c := &ProjectsSubscriptionsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsSubscriptionsGetIamPolicyCall) IfNoneMatch(entityTag string) *ProjectsSubscriptionsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsGetIamPolicyCall) Context(ctx context.Context) *ProjectsSubscriptionsGetIamPolicyCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsSubscriptionsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSubscriptionsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the access control policy for a `resource`. Returns an empty policy if the resource exists and does not have a policy set.", - // "httpMethod": "GET", - // "id": "pubsub.projects.subscriptions.getIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `getIamPolicy` documentation.", - // "location": "path", - // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:getIamPolicy", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.list": - -type ProjectsSubscriptionsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// List: Lists matching subscriptions. -func (r *ProjectsSubscriptionsService) List(project string) *ProjectsSubscriptionsListCall { - c := &ProjectsSubscriptionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// PageSize sets the optional parameter "pageSize": Maximum number of -// subscriptions to return. -func (c *ProjectsSubscriptionsListCall) PageSize(pageSize int64) *ProjectsSubscriptionsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": The value returned -// by the last `ListSubscriptionsResponse`; indicates that this is a -// continuation of a prior `ListSubscriptions` call, and that the system -// should return the next page of data. -func (c *ProjectsSubscriptionsListCall) PageToken(pageToken string) *ProjectsSubscriptionsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsListCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsSubscriptionsListCall) IfNoneMatch(entityTag string) *ProjectsSubscriptionsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsListCall) Context(ctx context.Context) *ProjectsSubscriptionsListCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsSubscriptionsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+project}/subscriptions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.list" call. -// Exactly one of *ListSubscriptionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *ListSubscriptionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsSubscriptionsListCall) Do(opts ...googleapi.CallOption) (*ListSubscriptionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListSubscriptionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists matching subscriptions.", - // "httpMethod": "GET", - // "id": "pubsub.projects.subscriptions.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "pageSize": { - // "description": "Maximum number of subscriptions to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that this is a continuation of a prior `ListSubscriptions` call, and that the system should return the next page of data.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "The name of the cloud project that subscriptions belong to.", - // "location": "path", - // "pattern": "^projects/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+project}/subscriptions", - // "response": { - // "$ref": "ListSubscriptionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsSubscriptionsListCall) Pages(ctx context.Context, f func(*ListSubscriptionsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "pubsub.projects.subscriptions.modifyAckDeadline": - -type ProjectsSubscriptionsModifyAckDeadlineCall struct { - s *Service - subscription string - modifyackdeadlinerequest *ModifyAckDeadlineRequest - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// ModifyAckDeadline: Modifies the ack deadline for a specific message. -// This method is useful to indicate that more time is needed to process -// a message by the subscriber, or to make the message available for -// redelivery if the processing was interrupted. -func (r *ProjectsSubscriptionsService) ModifyAckDeadline(subscription string, modifyackdeadlinerequest *ModifyAckDeadlineRequest) *ProjectsSubscriptionsModifyAckDeadlineCall { - c := &ProjectsSubscriptionsModifyAckDeadlineCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.subscription = subscription - c.modifyackdeadlinerequest = modifyackdeadlinerequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsModifyAckDeadlineCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Context(ctx context.Context) *ProjectsSubscriptionsModifyAckDeadlineCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsSubscriptionsModifyAckDeadlineCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifyackdeadlinerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}:modifyAckDeadline") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "subscription": c.subscription, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.modifyAckDeadline" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Modifies the ack deadline for a specific message. This method is useful to indicate that more time is needed to process a message by the subscriber, or to make the message available for redelivery if the processing was interrupted.", - // "httpMethod": "POST", - // "id": "pubsub.projects.subscriptions.modifyAckDeadline", - // "parameterOrder": [ - // "subscription" - // ], - // "parameters": { - // "subscription": { - // "description": "The name of the subscription.", - // "location": "path", - // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+subscription}:modifyAckDeadline", - // "request": { - // "$ref": "ModifyAckDeadlineRequest" - // }, - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.modifyPushConfig": - -type ProjectsSubscriptionsModifyPushConfigCall struct { - s *Service - subscription string - modifypushconfigrequest *ModifyPushConfigRequest - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// ModifyPushConfig: Modifies the `PushConfig` for a specified -// subscription. This may be used to change a push subscription to a -// pull one (signified by an empty `PushConfig`) or vice versa, or -// change the endpoint URL and other attributes of a push subscription. -// Messages will accumulate for delivery continuously through the call -// regardless of changes to the `PushConfig`. -func (r *ProjectsSubscriptionsService) ModifyPushConfig(subscription string, modifypushconfigrequest *ModifyPushConfigRequest) *ProjectsSubscriptionsModifyPushConfigCall { - c := &ProjectsSubscriptionsModifyPushConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.subscription = subscription - c.modifypushconfigrequest = modifypushconfigrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsModifyPushConfigCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsModifyPushConfigCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsModifyPushConfigCall) Context(ctx context.Context) *ProjectsSubscriptionsModifyPushConfigCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsSubscriptionsModifyPushConfigCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifypushconfigrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}:modifyPushConfig") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "subscription": c.subscription, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.modifyPushConfig" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSubscriptionsModifyPushConfigCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Modifies the `PushConfig` for a specified subscription. This may be used to change a push subscription to a pull one (signified by an empty `PushConfig`) or vice versa, or change the endpoint URL and other attributes of a push subscription. Messages will accumulate for delivery continuously through the call regardless of changes to the `PushConfig`.", - // "httpMethod": "POST", - // "id": "pubsub.projects.subscriptions.modifyPushConfig", - // "parameterOrder": [ - // "subscription" - // ], - // "parameters": { - // "subscription": { - // "description": "The name of the subscription.", - // "location": "path", - // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+subscription}:modifyPushConfig", - // "request": { - // "$ref": "ModifyPushConfigRequest" - // }, - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.pull": - -type ProjectsSubscriptionsPullCall struct { - s *Service - subscription string - pullrequest *PullRequest - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Pull: Pulls messages from the server. Returns an empty list if there -// are no messages available in the backlog. The server may return -// `UNAVAILABLE` if there are too many concurrent pull requests pending -// for the given subscription. -func (r *ProjectsSubscriptionsService) Pull(subscription string, pullrequest *PullRequest) *ProjectsSubscriptionsPullCall { - c := &ProjectsSubscriptionsPullCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.subscription = subscription - c.pullrequest = pullrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsPullCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsPullCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsPullCall) Context(ctx context.Context) *ProjectsSubscriptionsPullCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsSubscriptionsPullCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.pullrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}:pull") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "subscription": c.subscription, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.pull" call. -// Exactly one of *PullResponse or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *PullResponse.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsSubscriptionsPullCall) Do(opts ...googleapi.CallOption) (*PullResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &PullResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Pulls messages from the server. Returns an empty list if there are no messages available in the backlog. The server may return `UNAVAILABLE` if there are too many concurrent pull requests pending for the given subscription.", - // "httpMethod": "POST", - // "id": "pubsub.projects.subscriptions.pull", - // "parameterOrder": [ - // "subscription" - // ], - // "parameters": { - // "subscription": { - // "description": "The subscription from which messages should be pulled.", - // "location": "path", - // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+subscription}:pull", - // "request": { - // "$ref": "PullRequest" - // }, - // "response": { - // "$ref": "PullResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.setIamPolicy": - -type ProjectsSubscriptionsSetIamPolicyCall struct { - s *Service - resource string - setiampolicyrequest *SetIamPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *ProjectsSubscriptionsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsSubscriptionsSetIamPolicyCall { - c := &ProjectsSubscriptionsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.setiampolicyrequest = setiampolicyrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsSetIamPolicyCall) Context(ctx context.Context) *ProjectsSubscriptionsSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsSubscriptionsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSubscriptionsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "pubsub.projects.subscriptions.setIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `setIamPolicy` documentation.", - // "location": "path", - // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:setIamPolicy", - // "request": { - // "$ref": "SetIamPolicyRequest" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.testIamPermissions": - -type ProjectsSubscriptionsTestIamPermissionsCall struct { - s *Service - resource string - testiampermissionsrequest *TestIamPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *ProjectsSubscriptionsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsSubscriptionsTestIamPermissionsCall { - c := &ProjectsSubscriptionsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.testiampermissionsrequest = testiampermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsTestIamPermissionsCall) Context(ctx context.Context) *ProjectsSubscriptionsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsSubscriptionsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsSubscriptionsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestIamPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "pubsub.projects.subscriptions.testIamPermissions", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `testIamPermissions` documentation.", - // "location": "path", - // "pattern": "^projects/[^/]*/subscriptions/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:testIamPermissions", - // "request": { - // "$ref": "TestIamPermissionsRequest" - // }, - // "response": { - // "$ref": "TestIamPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.topics.create": - -type ProjectsTopicsCreateCall struct { - s *Service - name string - topic *Topic - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Create: Creates the given topic with the given name. -func (r *ProjectsTopicsService) Create(name string, topic *Topic) *ProjectsTopicsCreateCall { - c := &ProjectsTopicsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.topic = topic - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsCreateCall) Fields(s ...googleapi.Field) *ProjectsTopicsCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsCreateCall) Context(ctx context.Context) *ProjectsTopicsCreateCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsTopicsCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.topic) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.create" call. -// Exactly one of *Topic or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Topic.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsTopicsCreateCall) Do(opts ...googleapi.CallOption) (*Topic, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Topic{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates the given topic with the given name.", - // "httpMethod": "PUT", - // "id": "pubsub.projects.topics.create", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The name of the topic. It must have the format `\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", - // "location": "path", - // "pattern": "^projects/[^/]*/topics/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "request": { - // "$ref": "Topic" - // }, - // "response": { - // "$ref": "Topic" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.topics.delete": - -type ProjectsTopicsDeleteCall struct { - s *Service - topic string - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Delete: Deletes the topic with the given name. Returns `NOT_FOUND` if -// the topic does not exist. After a topic is deleted, a new topic may -// be created with the same name; this is an entirely new topic with -// none of the old configuration or subscriptions. Existing -// subscriptions to this topic are not deleted, but their `topic` field -// is set to `_deleted-topic_`. -func (r *ProjectsTopicsService) Delete(topic string) *ProjectsTopicsDeleteCall { - c := &ProjectsTopicsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.topic = topic - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsDeleteCall) Fields(s ...googleapi.Field) *ProjectsTopicsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsDeleteCall) Context(ctx context.Context) *ProjectsTopicsDeleteCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsTopicsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+topic}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "topic": c.topic, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsTopicsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the topic with the given name. Returns `NOT_FOUND` if the topic does not exist. After a topic is deleted, a new topic may be created with the same name; this is an entirely new topic with none of the old configuration or subscriptions. Existing subscriptions to this topic are not deleted, but their `topic` field is set to `_deleted-topic_`.", - // "httpMethod": "DELETE", - // "id": "pubsub.projects.topics.delete", - // "parameterOrder": [ - // "topic" - // ], - // "parameters": { - // "topic": { - // "description": "Name of the topic to delete.", - // "location": "path", - // "pattern": "^projects/[^/]*/topics/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+topic}", - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.topics.get": - -type ProjectsTopicsGetCall struct { - s *Service - topic string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// Get: Gets the configuration of a topic. -func (r *ProjectsTopicsService) Get(topic string) *ProjectsTopicsGetCall { - c := &ProjectsTopicsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.topic = topic - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsGetCall) Fields(s ...googleapi.Field) *ProjectsTopicsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsTopicsGetCall) IfNoneMatch(entityTag string) *ProjectsTopicsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsGetCall) Context(ctx context.Context) *ProjectsTopicsGetCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsTopicsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+topic}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "topic": c.topic, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.get" call. -// Exactly one of *Topic or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Topic.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsTopicsGetCall) Do(opts ...googleapi.CallOption) (*Topic, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Topic{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the configuration of a topic.", - // "httpMethod": "GET", - // "id": "pubsub.projects.topics.get", - // "parameterOrder": [ - // "topic" - // ], - // "parameters": { - // "topic": { - // "description": "The name of the topic to get.", - // "location": "path", - // "pattern": "^projects/[^/]*/topics/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+topic}", - // "response": { - // "$ref": "Topic" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.topics.getIamPolicy": - -type ProjectsTopicsGetIamPolicyCall struct { - s *Service - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// GetIamPolicy: Gets the access control policy for a `resource`. -// Returns an empty policy if the resource exists and does not have a -// policy set. -func (r *ProjectsTopicsService) GetIamPolicy(resource string) *ProjectsTopicsGetIamPolicyCall { - c := &ProjectsTopicsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsTopicsGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsTopicsGetIamPolicyCall) IfNoneMatch(entityTag string) *ProjectsTopicsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsGetIamPolicyCall) Context(ctx context.Context) *ProjectsTopicsGetIamPolicyCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsTopicsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsTopicsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the access control policy for a `resource`. Returns an empty policy if the resource exists and does not have a policy set.", - // "httpMethod": "GET", - // "id": "pubsub.projects.topics.getIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `getIamPolicy` documentation.", - // "location": "path", - // "pattern": "^projects/[^/]*/topics/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:getIamPolicy", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.topics.list": - -type ProjectsTopicsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// List: Lists matching topics. -func (r *ProjectsTopicsService) List(project string) *ProjectsTopicsListCall { - c := &ProjectsTopicsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// PageSize sets the optional parameter "pageSize": Maximum number of -// topics to return. -func (c *ProjectsTopicsListCall) PageSize(pageSize int64) *ProjectsTopicsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": The value returned -// by the last `ListTopicsResponse`; indicates that this is a -// continuation of a prior `ListTopics` call, and that the system should -// return the next page of data. -func (c *ProjectsTopicsListCall) PageToken(pageToken string) *ProjectsTopicsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsListCall) Fields(s ...googleapi.Field) *ProjectsTopicsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsTopicsListCall) IfNoneMatch(entityTag string) *ProjectsTopicsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsListCall) Context(ctx context.Context) *ProjectsTopicsListCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsTopicsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+project}/topics") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.list" call. -// Exactly one of *ListTopicsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListTopicsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsTopicsListCall) Do(opts ...googleapi.CallOption) (*ListTopicsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListTopicsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists matching topics.", - // "httpMethod": "GET", - // "id": "pubsub.projects.topics.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "pageSize": { - // "description": "Maximum number of topics to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "The value returned by the last `ListTopicsResponse`; indicates that this is a continuation of a prior `ListTopics` call, and that the system should return the next page of data.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "The name of the cloud project that topics belong to.", - // "location": "path", - // "pattern": "^projects/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+project}/topics", - // "response": { - // "$ref": "ListTopicsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsTopicsListCall) Pages(ctx context.Context, f func(*ListTopicsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "pubsub.projects.topics.publish": - -type ProjectsTopicsPublishCall struct { - s *Service - topic string - publishrequest *PublishRequest - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Publish: Adds one or more messages to the topic. Returns `NOT_FOUND` -// if the topic does not exist. The message payload must not be empty; -// it must contain either a non-empty data field, or at least one -// attribute. -func (r *ProjectsTopicsService) Publish(topic string, publishrequest *PublishRequest) *ProjectsTopicsPublishCall { - c := &ProjectsTopicsPublishCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.topic = topic - c.publishrequest = publishrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsPublishCall) Fields(s ...googleapi.Field) *ProjectsTopicsPublishCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsPublishCall) Context(ctx context.Context) *ProjectsTopicsPublishCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsTopicsPublishCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.publishrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+topic}:publish") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "topic": c.topic, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.publish" call. -// Exactly one of *PublishResponse or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *PublishResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsTopicsPublishCall) Do(opts ...googleapi.CallOption) (*PublishResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &PublishResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic does not exist. The message payload must not be empty; it must contain either a non-empty data field, or at least one attribute.", - // "httpMethod": "POST", - // "id": "pubsub.projects.topics.publish", - // "parameterOrder": [ - // "topic" - // ], - // "parameters": { - // "topic": { - // "description": "The messages in the request will be published on this topic.", - // "location": "path", - // "pattern": "^projects/[^/]*/topics/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+topic}:publish", - // "request": { - // "$ref": "PublishRequest" - // }, - // "response": { - // "$ref": "PublishResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.topics.setIamPolicy": - -type ProjectsTopicsSetIamPolicyCall struct { - s *Service - resource string - setiampolicyrequest *SetIamPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *ProjectsTopicsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsTopicsSetIamPolicyCall { - c := &ProjectsTopicsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.setiampolicyrequest = setiampolicyrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsTopicsSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsSetIamPolicyCall) Context(ctx context.Context) *ProjectsTopicsSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsTopicsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsTopicsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "pubsub.projects.topics.setIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `setIamPolicy` documentation.", - // "location": "path", - // "pattern": "^projects/[^/]*/topics/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:setIamPolicy", - // "request": { - // "$ref": "SetIamPolicyRequest" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.topics.testIamPermissions": - -type ProjectsTopicsTestIamPermissionsCall struct { - s *Service - resource string - testiampermissionsrequest *TestIamPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *ProjectsTopicsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsTopicsTestIamPermissionsCall { - c := &ProjectsTopicsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.testiampermissionsrequest = testiampermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsTopicsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsTestIamPermissionsCall) Context(ctx context.Context) *ProjectsTopicsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsTopicsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsTopicsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestIamPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "pubsub.projects.topics.testIamPermissions", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `testIamPermissions` documentation.", - // "location": "path", - // "pattern": "^projects/[^/]*/topics/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:testIamPermissions", - // "request": { - // "$ref": "TestIamPermissionsRequest" - // }, - // "response": { - // "$ref": "TestIamPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.topics.subscriptions.list": - -type ProjectsTopicsSubscriptionsListCall struct { - s *Service - topic string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// List: Lists the name of the subscriptions for this topic. -func (r *ProjectsTopicsSubscriptionsService) List(topic string) *ProjectsTopicsSubscriptionsListCall { - c := &ProjectsTopicsSubscriptionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.topic = topic - return c -} - -// PageSize sets the optional parameter "pageSize": Maximum number of -// subscription names to return. -func (c *ProjectsTopicsSubscriptionsListCall) PageSize(pageSize int64) *ProjectsTopicsSubscriptionsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": The value returned -// by the last `ListTopicSubscriptionsResponse`; indicates that this is -// a continuation of a prior `ListTopicSubscriptions` call, and that the -// system should return the next page of data. -func (c *ProjectsTopicsSubscriptionsListCall) PageToken(pageToken string) *ProjectsTopicsSubscriptionsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsSubscriptionsListCall) Fields(s ...googleapi.Field) *ProjectsTopicsSubscriptionsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsTopicsSubscriptionsListCall) IfNoneMatch(entityTag string) *ProjectsTopicsSubscriptionsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsSubscriptionsListCall) Context(ctx context.Context) *ProjectsTopicsSubscriptionsListCall { - c.ctx_ = ctx - return c -} - -func (c *ProjectsTopicsSubscriptionsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+topic}/subscriptions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "topic": c.topic, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.subscriptions.list" call. -// Exactly one of *ListTopicSubscriptionsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ListTopicSubscriptionsResponse.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsTopicsSubscriptionsListCall) Do(opts ...googleapi.CallOption) (*ListTopicSubscriptionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListTopicSubscriptionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists the name of the subscriptions for this topic.", - // "httpMethod": "GET", - // "id": "pubsub.projects.topics.subscriptions.list", - // "parameterOrder": [ - // "topic" - // ], - // "parameters": { - // "pageSize": { - // "description": "Maximum number of subscription names to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates that this is a continuation of a prior `ListTopicSubscriptions` call, and that the system should return the next page of data.", - // "location": "query", - // "type": "string" - // }, - // "topic": { - // "description": "The name of the topic that subscriptions are attached to.", - // "location": "path", - // "pattern": "^projects/[^/]*/topics/[^/]*$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+topic}/subscriptions", - // "response": { - // "$ref": "ListTopicSubscriptionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsTopicsSubscriptionsListCall) Pages(ctx context.Context, f func(*ListTopicSubscriptionsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} diff --git a/vendor/google.golang.org/api/transport/BUILD b/vendor/google.golang.org/api/transport/BUILD deleted file mode 100644 index 2cd950704196..000000000000 --- a/vendor/google.golang.org/api/transport/BUILD +++ /dev/null @@ -1,35 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["dial.go"], - importpath = "google.golang.org/api/transport", - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/golang.org/x/oauth2:go_default_library", - "//vendor/golang.org/x/oauth2/google:go_default_library", - "//vendor/google.golang.org/api/internal:go_default_library", - "//vendor/google.golang.org/api/option:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/google.golang.org/grpc/credentials:go_default_library", - "//vendor/google.golang.org/grpc/credentials/oauth:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/api/transport/dial.go b/vendor/google.golang.org/api/transport/dial.go deleted file mode 100644 index c054460afe93..000000000000 --- a/vendor/google.golang.org/api/transport/dial.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package transport supports network connections to HTTP and GRPC servers. -// This package is not intended for use by end developers. Use the -// google.golang.org/api/option package to configure API clients. -package transport - -import ( - "errors" - "fmt" - "io/ioutil" - "net/http" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/oauth" - - "google.golang.org/api/internal" - "google.golang.org/api/option" -) - -// NewHTTPClient returns an HTTP client for use communicating with a Google cloud -// service, configured with the given ClientOptions. It also returns the endpoint -// for the service as specified in the options. -func NewHTTPClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { - var o internal.DialSettings - for _, opt := range opts { - opt.Apply(&o) - } - if o.GRPCConn != nil { - return nil, "", errors.New("unsupported gRPC connection specified") - } - // TODO(djd): Set UserAgent on all outgoing requests. - if o.HTTPClient != nil { - return o.HTTPClient, o.Endpoint, nil - } - if o.ServiceAccountJSONFilename != "" { - ts, err := serviceAcctTokenSource(ctx, o.ServiceAccountJSONFilename, o.Scopes...) - if err != nil { - return nil, "", err - } - o.TokenSource = ts - } - if o.TokenSource == nil { - var err error - o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...) - if err != nil { - return nil, "", fmt.Errorf("google.DefaultTokenSource: %v", err) - } - } - return oauth2.NewClient(ctx, o.TokenSource), o.Endpoint, nil -} - -// Set at init time by dial_appengine.go. If nil, we're not on App Engine. -var appengineDialerHook func(context.Context) grpc.DialOption - -// DialGRPC returns a GRPC connection for use communicating with a Google cloud -// service, configured with the given ClientOptions. -func DialGRPC(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { - var o internal.DialSettings - for _, opt := range opts { - opt.Apply(&o) - } - if o.HTTPClient != nil { - return nil, errors.New("unsupported HTTP client specified") - } - if o.GRPCConn != nil { - return o.GRPCConn, nil - } - if o.ServiceAccountJSONFilename != "" { - ts, err := serviceAcctTokenSource(ctx, o.ServiceAccountJSONFilename, o.Scopes...) - if err != nil { - return nil, err - } - o.TokenSource = ts - } - if o.TokenSource == nil { - var err error - o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...) - if err != nil { - return nil, fmt.Errorf("google.DefaultTokenSource: %v", err) - } - } - grpcOpts := []grpc.DialOption{ - grpc.WithPerRPCCredentials(oauth.TokenSource{o.TokenSource}), - grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), - } - if appengineDialerHook != nil { - // Use the Socket API on App Engine. - grpcOpts = append(grpcOpts, appengineDialerHook(ctx)) - } - grpcOpts = append(grpcOpts, o.GRPCDialOpts...) - if o.UserAgent != "" { - grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) - } - return grpc.DialContext(ctx, o.Endpoint, grpcOpts...) -} - -func serviceAcctTokenSource(ctx context.Context, filename string, scope ...string) (oauth2.TokenSource, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - return nil, fmt.Errorf("cannot read service account file: %v", err) - } - cfg, err := google.JWTConfigFromJSON(data, scope...) - if err != nil { - return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err) - } - return cfg.TokenSource(ctx), nil -} diff --git a/vendor/google.golang.org/api/transport/dial_appengine.go b/vendor/google.golang.org/api/transport/dial_appengine.go deleted file mode 100644 index 201244d249b9..000000000000 --- a/vendor/google.golang.org/api/transport/dial_appengine.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build appengine - -package transport - -import ( - "net" - "time" - - "golang.org/x/net/context" - "google.golang.org/appengine/socket" - "google.golang.org/grpc" -) - -func init() { - appengineDialerHook = func(ctx context.Context) grpc.DialOption { - return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { - return socket.DialTimeout(ctx, "tcp", addr, timeout) - }) - } -} diff --git a/vendor/google.golang.org/appengine/BUILD b/vendor/google.golang.org/appengine/BUILD index 88a593e4ab4f..74f983d873e8 100644 --- a/vendor/google.golang.org/appengine/BUILD +++ b/vendor/google.golang.org/appengine/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -16,6 +11,7 @@ go_library( "timeout.go", ], importpath = "google.golang.org/appengine", + visibility = ["//visibility:public"], deps = [ "//vendor/github.com/golang/protobuf/proto:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", @@ -25,6 +21,17 @@ go_library( ], ) +go_test( + name = "go_default_test", + srcs = [ + "appengine_test.go", + "namespace_test.go", + ], + importpath = "google.golang.org/appengine", + library = ":go_default_library", + deps = ["//vendor/golang.org/x/net/context:go_default_library"], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -37,6 +44,8 @@ filegroup( srcs = [ ":package-srcs", "//vendor/google.golang.org/appengine/internal:all-srcs", + "//vendor/google.golang.org/appengine/urlfetch:all-srcs", ], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go index 8865c49a5f5e..475cf2e32e80 100644 --- a/vendor/google.golang.org/appengine/appengine.go +++ b/vendor/google.golang.org/appengine/appengine.go @@ -6,7 +6,7 @@ // // For more information on how to write Go apps for Google App Engine, see: // https://cloud.google.com/appengine/docs/go/ -package appengine +package appengine // import "google.golang.org/appengine" import ( "net/http" diff --git a/vendor/google.golang.org/appengine/appengine_test.go b/vendor/google.golang.org/appengine/appengine_test.go new file mode 100644 index 000000000000..f1cf0a1b9688 --- /dev/null +++ b/vendor/google.golang.org/appengine/appengine_test.go @@ -0,0 +1,49 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "testing" +) + +func TestValidGeoPoint(t *testing.T) { + testCases := []struct { + desc string + pt GeoPoint + want bool + }{ + { + "valid", + GeoPoint{67.21, 13.37}, + true, + }, + { + "high lat", + GeoPoint{-90.01, 13.37}, + false, + }, + { + "low lat", + GeoPoint{90.01, 13.37}, + false, + }, + { + "high lng", + GeoPoint{67.21, 182}, + false, + }, + { + "low lng", + GeoPoint{67.21, -181}, + false, + }, + } + + for _, tc := range testCases { + if got := tc.pt.Valid(); got != tc.want { + t.Errorf("%s: got %v, want %v", tc.desc, got, tc.want) + } + } +} diff --git a/vendor/google.golang.org/appengine/internal/BUILD b/vendor/google.golang.org/appengine/internal/BUILD index 4b24f91ccc80..86f957617df0 100644 --- a/vendor/google.golang.org/appengine/internal/BUILD +++ b/vendor/google.golang.org/appengine/internal/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -20,6 +15,7 @@ go_library( "transaction.go", ], importpath = "google.golang.org/appengine/internal", + visibility = ["//visibility:public"], deps = [ "//vendor/github.com/golang/protobuf/proto:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", @@ -30,6 +26,24 @@ go_library( ], ) +go_test( + name = "go_default_test", + srcs = [ + "api_test.go", + "app_id_test.go", + "internal_vm_test.go", + "net_test.go", + ], + importpath = "google.golang.org/appengine/internal", + library = ":go_default_library", + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/appengine/internal/base:go_default_library", + "//vendor/google.golang.org/appengine/internal/remote_api:go_default_library", + ], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -47,6 +61,8 @@ filegroup( "//vendor/google.golang.org/appengine/internal/log:all-srcs", "//vendor/google.golang.org/appengine/internal/modules:all-srcs", "//vendor/google.golang.org/appengine/internal/remote_api:all-srcs", + "//vendor/google.golang.org/appengine/internal/urlfetch:all-srcs", ], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/google.golang.org/appengine/internal/api_race_test.go b/vendor/google.golang.org/appengine/internal/api_race_test.go new file mode 100644 index 000000000000..6cfe90649c7b --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_race_test.go @@ -0,0 +1,9 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build race + +package internal + +func init() { raceDetector = true } diff --git a/vendor/google.golang.org/appengine/internal/api_test.go b/vendor/google.golang.org/appengine/internal/api_test.go new file mode 100644 index 000000000000..386d7f6cf490 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_test.go @@ -0,0 +1,467 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "os/exec" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + remotepb "google.golang.org/appengine/internal/remote_api" +) + +const testTicketHeader = "X-Magic-Ticket-Header" + +func init() { + ticketHeader = testTicketHeader +} + +type fakeAPIHandler struct { + hang chan int // used for RunSlowly RPC + + LogFlushes int32 // atomic +} + +func (f *fakeAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + writeResponse := func(res *remotepb.Response) { + hresBody, err := proto.Marshal(res) + if err != nil { + http.Error(w, fmt.Sprintf("Failed encoding API response: %v", err), 500) + return + } + w.Write(hresBody) + } + + if r.URL.Path != "/rpc_http" { + http.NotFound(w, r) + return + } + hreqBody, err := ioutil.ReadAll(r.Body) + if err != nil { + http.Error(w, fmt.Sprintf("Bad body: %v", err), 500) + return + } + apiReq := &remotepb.Request{} + if err := proto.Unmarshal(hreqBody, apiReq); err != nil { + http.Error(w, fmt.Sprintf("Bad encoded API request: %v", err), 500) + return + } + if *apiReq.RequestId != "s3cr3t" { + writeResponse(&remotepb.Response{ + RpcError: &remotepb.RpcError{ + Code: proto.Int32(int32(remotepb.RpcError_SECURITY_VIOLATION)), + Detail: proto.String("bad security ticket"), + }, + }) + return + } + if got, want := r.Header.Get(dapperHeader), "trace-001"; got != want { + writeResponse(&remotepb.Response{ + RpcError: &remotepb.RpcError{ + Code: proto.Int32(int32(remotepb.RpcError_BAD_REQUEST)), + Detail: proto.String(fmt.Sprintf("trace info = %q, want %q", got, want)), + }, + }) + return + } + + service, method := *apiReq.ServiceName, *apiReq.Method + var resOut proto.Message + if service == "actordb" && method == "LookupActor" { + req := &basepb.StringProto{} + res := &basepb.StringProto{} + if err := proto.Unmarshal(apiReq.Request, req); err != nil { + http.Error(w, fmt.Sprintf("Bad encoded request: %v", err), 500) + return + } + if *req.Value == "Doctor Who" { + res.Value = proto.String("David Tennant") + } + resOut = res + } + if service == "errors" { + switch method { + case "Non200": + http.Error(w, "I'm a little teapot.", 418) + return + case "ShortResponse": + w.Header().Set("Content-Length", "100") + w.Write([]byte("way too short")) + return + case "OverQuota": + writeResponse(&remotepb.Response{ + RpcError: &remotepb.RpcError{ + Code: proto.Int32(int32(remotepb.RpcError_OVER_QUOTA)), + Detail: proto.String("you are hogging the resources!"), + }, + }) + return + case "RunSlowly": + // TestAPICallRPCFailure creates f.hang, but does not strobe it + // until Call returns with remotepb.RpcError_CANCELLED. + // This is here to force a happens-before relationship between + // the httptest server handler and shutdown. + <-f.hang + resOut = &basepb.VoidProto{} + } + } + if service == "logservice" && method == "Flush" { + // Pretend log flushing is slow. + time.Sleep(50 * time.Millisecond) + atomic.AddInt32(&f.LogFlushes, 1) + resOut = &basepb.VoidProto{} + } + + encOut, err := proto.Marshal(resOut) + if err != nil { + http.Error(w, fmt.Sprintf("Failed encoding response: %v", err), 500) + return + } + writeResponse(&remotepb.Response{ + Response: encOut, + }) +} + +func setup() (f *fakeAPIHandler, c *context, cleanup func()) { + f = &fakeAPIHandler{} + srv := httptest.NewServer(f) + u, err := url.Parse(srv.URL + apiPath) + if err != nil { + panic(fmt.Sprintf("url.Parse(%q): %v", srv.URL+apiPath, err)) + } + return f, &context{ + req: &http.Request{ + Header: http.Header{ + ticketHeader: []string{"s3cr3t"}, + dapperHeader: []string{"trace-001"}, + }, + }, + apiURL: u, + }, srv.Close +} + +func TestAPICall(t *testing.T) { + _, c, cleanup := setup() + defer cleanup() + + req := &basepb.StringProto{ + Value: proto.String("Doctor Who"), + } + res := &basepb.StringProto{} + err := Call(toContext(c), "actordb", "LookupActor", req, res) + if err != nil { + t.Fatalf("API call failed: %v", err) + } + if got, want := *res.Value, "David Tennant"; got != want { + t.Errorf("Response is %q, want %q", got, want) + } +} + +func TestAPICallRPCFailure(t *testing.T) { + f, c, cleanup := setup() + defer cleanup() + + testCases := []struct { + method string + code remotepb.RpcError_ErrorCode + }{ + {"Non200", remotepb.RpcError_UNKNOWN}, + {"ShortResponse", remotepb.RpcError_UNKNOWN}, + {"OverQuota", remotepb.RpcError_OVER_QUOTA}, + {"RunSlowly", remotepb.RpcError_CANCELLED}, + } + f.hang = make(chan int) // only for RunSlowly + for _, tc := range testCases { + ctx, _ := netcontext.WithTimeout(toContext(c), 100*time.Millisecond) + err := Call(ctx, "errors", tc.method, &basepb.VoidProto{}, &basepb.VoidProto{}) + ce, ok := err.(*CallError) + if !ok { + t.Errorf("%s: API call error is %T (%v), want *CallError", tc.method, err, err) + continue + } + if ce.Code != int32(tc.code) { + t.Errorf("%s: ce.Code = %d, want %d", tc.method, ce.Code, tc.code) + } + if tc.method == "RunSlowly" { + f.hang <- 1 // release the HTTP handler + } + } +} + +func TestAPICallDialFailure(t *testing.T) { + // See what happens if the API host is unresponsive. + // This should time out quickly, not hang forever. + _, c, cleanup := setup() + defer cleanup() + // Reset the URL to the production address so that dialing fails. + c.apiURL = apiURL() + + start := time.Now() + err := Call(toContext(c), "foo", "bar", &basepb.VoidProto{}, &basepb.VoidProto{}) + const max = 1 * time.Second + if taken := time.Since(start); taken > max { + t.Errorf("Dial hang took too long: %v > %v", taken, max) + } + if err == nil { + t.Error("Call did not fail") + } +} + +func TestDelayedLogFlushing(t *testing.T) { + f, c, cleanup := setup() + defer cleanup() + + http.HandleFunc("/quick_log", func(w http.ResponseWriter, r *http.Request) { + logC := WithContext(netcontext.Background(), r) + fromContext(logC).apiURL = c.apiURL // Otherwise it will try to use the default URL. + Logf(logC, 1, "It's a lovely day.") + w.WriteHeader(200) + w.Write(make([]byte, 100<<10)) // write 100 KB to force HTTP flush + }) + + r := &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Path: "/quick_log", + }, + Header: c.req.Header, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + } + w := httptest.NewRecorder() + + // Check that log flushing does not hold up the HTTP response. + start := time.Now() + handleHTTP(w, r) + if d := time.Since(start); d > 10*time.Millisecond { + t.Errorf("handleHTTP took %v, want under 10ms", d) + } + const hdr = "X-AppEngine-Log-Flush-Count" + if h := w.HeaderMap.Get(hdr); h != "1" { + t.Errorf("%s header = %q, want %q", hdr, h, "1") + } + if f := atomic.LoadInt32(&f.LogFlushes); f != 0 { + t.Errorf("After HTTP response: f.LogFlushes = %d, want 0", f) + } + + // Check that the log flush eventually comes in. + time.Sleep(100 * time.Millisecond) + if f := atomic.LoadInt32(&f.LogFlushes); f != 1 { + t.Errorf("After 100ms: f.LogFlushes = %d, want 1", f) + } +} + +func TestRemoteAddr(t *testing.T) { + var addr string + http.HandleFunc("/remote_addr", func(w http.ResponseWriter, r *http.Request) { + addr = r.RemoteAddr + }) + + testCases := []struct { + headers http.Header + addr string + }{ + {http.Header{"X-Appengine-User-Ip": []string{"10.5.2.1"}}, "10.5.2.1:80"}, + {http.Header{"X-Appengine-Remote-Addr": []string{"1.2.3.4"}}, "1.2.3.4:80"}, + {http.Header{"X-Appengine-Remote-Addr": []string{"1.2.3.4:8080"}}, "1.2.3.4:8080"}, + { + http.Header{"X-Appengine-Remote-Addr": []string{"2401:fa00:9:1:7646:a0ff:fe90:ca66"}}, + "[2401:fa00:9:1:7646:a0ff:fe90:ca66]:80", + }, + { + http.Header{"X-Appengine-Remote-Addr": []string{"[::1]:http"}}, + "[::1]:http", + }, + {http.Header{}, "127.0.0.1:80"}, + } + + for _, tc := range testCases { + r := &http.Request{ + Method: "GET", + URL: &url.URL{Scheme: "http", Path: "/remote_addr"}, + Header: tc.headers, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + } + handleHTTP(httptest.NewRecorder(), r) + if addr != tc.addr { + t.Errorf("Header %v, got %q, want %q", tc.headers, addr, tc.addr) + } + } +} + +func TestPanickingHandler(t *testing.T) { + http.HandleFunc("/panic", func(http.ResponseWriter, *http.Request) { + panic("whoops!") + }) + r := &http.Request{ + Method: "GET", + URL: &url.URL{Scheme: "http", Path: "/panic"}, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + } + rec := httptest.NewRecorder() + handleHTTP(rec, r) + if rec.Code != 500 { + t.Errorf("Panicking handler returned HTTP %d, want HTTP %d", rec.Code, 500) + } +} + +var raceDetector = false + +func TestAPICallAllocations(t *testing.T) { + if raceDetector { + t.Skip("not running under race detector") + } + + // Run the test API server in a subprocess so we aren't counting its allocations. + u, cleanup := launchHelperProcess(t) + defer cleanup() + c := &context{ + req: &http.Request{ + Header: http.Header{ + ticketHeader: []string{"s3cr3t"}, + dapperHeader: []string{"trace-001"}, + }, + }, + apiURL: u, + } + + req := &basepb.StringProto{ + Value: proto.String("Doctor Who"), + } + res := &basepb.StringProto{} + var apiErr error + avg := testing.AllocsPerRun(100, func() { + ctx, _ := netcontext.WithTimeout(toContext(c), 100*time.Millisecond) + if err := Call(ctx, "actordb", "LookupActor", req, res); err != nil && apiErr == nil { + apiErr = err // get the first error only + } + }) + if apiErr != nil { + t.Errorf("API call failed: %v", apiErr) + } + + // Lots of room for improvement... + // TODO(djd): Reduce maximum to 85 once the App Engine SDK is based on 1.6. + const min, max float64 = 70, 90 + if avg < min || max < avg { + t.Errorf("Allocations per API call = %g, want in [%g,%g]", avg, min, max) + } +} + +func launchHelperProcess(t *testing.T) (apiURL *url.URL, cleanup func()) { + cmd := exec.Command(os.Args[0], "-test.run=TestHelperProcess") + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + stdin, err := cmd.StdinPipe() + if err != nil { + t.Fatalf("StdinPipe: %v", err) + } + stdout, err := cmd.StdoutPipe() + if err != nil { + t.Fatalf("StdoutPipe: %v", err) + } + if err := cmd.Start(); err != nil { + t.Fatalf("Starting helper process: %v", err) + } + + scan := bufio.NewScanner(stdout) + var u *url.URL + for scan.Scan() { + line := scan.Text() + if hp := strings.TrimPrefix(line, helperProcessMagic); hp != line { + var err error + u, err = url.Parse(hp) + if err != nil { + t.Fatalf("Failed to parse %q: %v", hp, err) + } + break + } + } + if err := scan.Err(); err != nil { + t.Fatalf("Scanning helper process stdout: %v", err) + } + if u == nil { + t.Fatal("Helper process never reported") + } + + return u, func() { + stdin.Close() + if err := cmd.Wait(); err != nil { + t.Errorf("Helper process did not exit cleanly: %v", err) + } + } +} + +const helperProcessMagic = "A lovely helper process is listening at " + +// This isn't a real test. It's used as a helper process. +func TestHelperProcess(*testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + defer os.Exit(0) + + f := &fakeAPIHandler{} + srv := httptest.NewServer(f) + defer srv.Close() + fmt.Println(helperProcessMagic + srv.URL + apiPath) + + // Wait for stdin to be closed. + io.Copy(ioutil.Discard, os.Stdin) +} + +func TestBackgroundContext(t *testing.T) { + environ := []struct { + key, value string + }{ + {"GAE_LONG_APP_ID", "my-app-id"}, + {"GAE_MINOR_VERSION", "067924799508853122"}, + {"GAE_MODULE_INSTANCE", "0"}, + {"GAE_MODULE_NAME", "default"}, + {"GAE_MODULE_VERSION", "20150612t184001"}, + } + for _, v := range environ { + old := os.Getenv(v.key) + os.Setenv(v.key, v.value) + v.value = old + } + defer func() { // Restore old environment after the test completes. + for _, v := range environ { + if v.value == "" { + os.Unsetenv(v.key) + continue + } + os.Setenv(v.key, v.value) + } + }() + + ctx, key := fromContext(BackgroundContext()), "X-Magic-Ticket-Header" + if g, w := ctx.req.Header.Get(key), "my-app-id/default.20150612t184001.0"; g != w { + t.Errorf("%v = %q, want %q", key, g, w) + } + + // Check that using the background context doesn't panic. + req := &basepb.StringProto{ + Value: proto.String("Doctor Who"), + } + res := &basepb.StringProto{} + Call(BackgroundContext(), "actordb", "LookupActor", req, res) // expected to fail +} diff --git a/vendor/google.golang.org/appengine/internal/app_id_test.go b/vendor/google.golang.org/appengine/internal/app_id_test.go new file mode 100644 index 000000000000..e69195cd4055 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_id_test.go @@ -0,0 +1,34 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "testing" +) + +func TestAppIDParsing(t *testing.T) { + testCases := []struct { + in string + partition, domain, displayID string + }{ + {"simple-app-id", "", "", "simple-app-id"}, + {"domain.com:domain-app-id", "", "domain.com", "domain-app-id"}, + {"part~partition-app-id", "part", "", "partition-app-id"}, + {"part~domain.com:display", "part", "domain.com", "display"}, + } + + for _, tc := range testCases { + part, dom, dis := parseFullAppID(tc.in) + if part != tc.partition { + t.Errorf("partition of %q: got %q, want %q", tc.in, part, tc.partition) + } + if dom != tc.domain { + t.Errorf("domain of %q: got %q, want %q", tc.in, dom, tc.domain) + } + if dis != tc.displayID { + t.Errorf("displayID of %q: got %q, want %q", tc.in, dis, tc.displayID) + } + } +} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/BUILD b/vendor/google.golang.org/appengine/internal/app_identity/BUILD index e4e19469374c..a875445bc2b2 100644 --- a/vendor/google.golang.org/appengine/internal/app_identity/BUILD +++ b/vendor/google.golang.org/appengine/internal/app_identity/BUILD @@ -1,14 +1,16 @@ -package(default_visibility = ["//visibility:public"]) +load("@io_bazel_rules_go//go:def.bzl", "go_library") -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", +filegroup( + name = "go_default_library_protos", + srcs = ["app_identity_service.proto"], + visibility = ["//visibility:public"], ) go_library( name = "go_default_library", srcs = ["app_identity_service.pb.go"], importpath = "google.golang.org/appengine/internal/app_identity", + visibility = ["//vendor/google.golang.org/appengine:__subpackages__"], deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], ) @@ -23,10 +25,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], -) - -filegroup( - name = "go_default_library_protos", - srcs = ["app_identity_service.proto"], visibility = ["//visibility:public"], ) diff --git a/vendor/google.golang.org/appengine/internal/base/BUILD b/vendor/google.golang.org/appengine/internal/base/BUILD index 9d4ce23c95ef..8955a142b449 100644 --- a/vendor/google.golang.org/appengine/internal/base/BUILD +++ b/vendor/google.golang.org/appengine/internal/base/BUILD @@ -1,14 +1,16 @@ -package(default_visibility = ["//visibility:public"]) +load("@io_bazel_rules_go//go:def.bzl", "go_library") -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", +filegroup( + name = "go_default_library_protos", + srcs = ["api_base.proto"], + visibility = ["//visibility:public"], ) go_library( name = "go_default_library", srcs = ["api_base.pb.go"], importpath = "google.golang.org/appengine/internal/base", + visibility = ["//vendor/google.golang.org/appengine:__subpackages__"], deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], ) @@ -23,10 +25,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], -) - -filegroup( - name = "go_default_library_protos", - srcs = ["api_base.proto"], visibility = ["//visibility:public"], ) diff --git a/vendor/google.golang.org/appengine/internal/datastore/BUILD b/vendor/google.golang.org/appengine/internal/datastore/BUILD index a46325424a58..ebc69dbf8c0d 100644 --- a/vendor/google.golang.org/appengine/internal/datastore/BUILD +++ b/vendor/google.golang.org/appengine/internal/datastore/BUILD @@ -1,14 +1,16 @@ -package(default_visibility = ["//visibility:public"]) +load("@io_bazel_rules_go//go:def.bzl", "go_library") -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", +filegroup( + name = "go_default_library_protos", + srcs = ["datastore_v3.proto"], + visibility = ["//visibility:public"], ) go_library( name = "go_default_library", srcs = ["datastore_v3.pb.go"], importpath = "google.golang.org/appengine/internal/datastore", + visibility = ["//vendor/google.golang.org/appengine:__subpackages__"], deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], ) @@ -23,10 +25,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], -) - -filegroup( - name = "go_default_library_protos", - srcs = ["datastore_v3.proto"], visibility = ["//visibility:public"], ) diff --git a/vendor/google.golang.org/appengine/internal/internal_vm_test.go b/vendor/google.golang.org/appengine/internal/internal_vm_test.go new file mode 100644 index 000000000000..f8097616b923 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/internal_vm_test.go @@ -0,0 +1,60 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" +) + +func TestInstallingHealthChecker(t *testing.T) { + try := func(desc string, mux *http.ServeMux, wantCode int, wantBody string) { + installHealthChecker(mux) + srv := httptest.NewServer(mux) + defer srv.Close() + + resp, err := http.Get(srv.URL + "/_ah/health") + if err != nil { + t.Errorf("%s: http.Get: %v", desc, err) + return + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("%s: reading body: %v", desc, err) + return + } + + if resp.StatusCode != wantCode { + t.Errorf("%s: got HTTP %d, want %d", desc, resp.StatusCode, wantCode) + return + } + if wantBody != "" && string(body) != wantBody { + t.Errorf("%s: got HTTP body %q, want %q", desc, body, wantBody) + return + } + } + + // If there's no handlers, or only a root handler, a health checker should be installed. + try("empty mux", http.NewServeMux(), 200, "ok") + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "root handler") + }) + try("mux with root handler", mux, 200, "ok") + + // If there's a custom health check handler, one should not be installed. + mux = http.NewServeMux() + mux.HandleFunc("/_ah/health", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(418) + io.WriteString(w, "I'm short and stout!") + }) + try("mux with custom health checker", mux, 418, "I'm short and stout!") +} diff --git a/vendor/google.golang.org/appengine/internal/log/BUILD b/vendor/google.golang.org/appengine/internal/log/BUILD index fb77fec2bf60..c339adec524d 100644 --- a/vendor/google.golang.org/appengine/internal/log/BUILD +++ b/vendor/google.golang.org/appengine/internal/log/BUILD @@ -1,14 +1,16 @@ -package(default_visibility = ["//visibility:public"]) +load("@io_bazel_rules_go//go:def.bzl", "go_library") -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", +filegroup( + name = "go_default_library_protos", + srcs = ["log_service.proto"], + visibility = ["//visibility:public"], ) go_library( name = "go_default_library", srcs = ["log_service.pb.go"], importpath = "google.golang.org/appengine/internal/log", + visibility = ["//vendor/google.golang.org/appengine:__subpackages__"], deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], ) @@ -23,10 +25,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], -) - -filegroup( - name = "go_default_library_protos", - srcs = ["log_service.proto"], visibility = ["//visibility:public"], ) diff --git a/vendor/google.golang.org/appengine/internal/modules/BUILD b/vendor/google.golang.org/appengine/internal/modules/BUILD index 4909ac9a2b99..c366c399eaa0 100644 --- a/vendor/google.golang.org/appengine/internal/modules/BUILD +++ b/vendor/google.golang.org/appengine/internal/modules/BUILD @@ -1,14 +1,16 @@ -package(default_visibility = ["//visibility:public"]) +load("@io_bazel_rules_go//go:def.bzl", "go_library") -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", +filegroup( + name = "go_default_library_protos", + srcs = ["modules_service.proto"], + visibility = ["//visibility:public"], ) go_library( name = "go_default_library", srcs = ["modules_service.pb.go"], importpath = "google.golang.org/appengine/internal/modules", + visibility = ["//vendor/google.golang.org/appengine:__subpackages__"], deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], ) @@ -23,10 +25,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], -) - -filegroup( - name = "go_default_library_protos", - srcs = ["modules_service.proto"], visibility = ["//visibility:public"], ) diff --git a/vendor/google.golang.org/appengine/internal/net_test.go b/vendor/google.golang.org/appengine/internal/net_test.go new file mode 100644 index 000000000000..24da8bb2b15b --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/net_test.go @@ -0,0 +1,58 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "sync" + "testing" + "time" + + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" +) + +func TestDialLimit(t *testing.T) { + // Fill up semaphore with false acquisitions to permit only two TCP connections at a time. + // We don't replace limitSem because that results in a data race when net/http lazily closes connections. + nFake := cap(limitSem) - 2 + for i := 0; i < nFake; i++ { + limitSem <- 1 + } + defer func() { + for i := 0; i < nFake; i++ { + <-limitSem + } + }() + + f, c, cleanup := setup() // setup is in api_test.go + defer cleanup() + f.hang = make(chan int) + + // If we make two RunSlowly RPCs (which will wait for f.hang to be strobed), + // then the simple Non200 RPC should hang. + var wg sync.WaitGroup + wg.Add(2) + for i := 0; i < 2; i++ { + go func() { + defer wg.Done() + Call(toContext(c), "errors", "RunSlowly", &basepb.VoidProto{}, &basepb.VoidProto{}) + }() + } + time.Sleep(50 * time.Millisecond) // let those two RPCs start + + ctx, _ := netcontext.WithTimeout(toContext(c), 50*time.Millisecond) + err := Call(ctx, "errors", "Non200", &basepb.VoidProto{}, &basepb.VoidProto{}) + if err != errTimeout { + t.Errorf("Non200 RPC returned with err %v, want errTimeout", err) + } + + // Drain the two RunSlowly calls. + f.hang <- 1 + f.hang <- 1 + wg.Wait() +} diff --git a/vendor/google.golang.org/appengine/internal/remote_api/BUILD b/vendor/google.golang.org/appengine/internal/remote_api/BUILD index a838c576c187..359f91549a42 100644 --- a/vendor/google.golang.org/appengine/internal/remote_api/BUILD +++ b/vendor/google.golang.org/appengine/internal/remote_api/BUILD @@ -1,14 +1,16 @@ -package(default_visibility = ["//visibility:public"]) +load("@io_bazel_rules_go//go:def.bzl", "go_library") -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", +filegroup( + name = "go_default_library_protos", + srcs = ["remote_api.proto"], + visibility = ["//visibility:public"], ) go_library( name = "go_default_library", srcs = ["remote_api.pb.go"], importpath = "google.golang.org/appengine/internal/remote_api", + visibility = ["//vendor/google.golang.org/appengine:__subpackages__"], deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], ) @@ -23,10 +25,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], -) - -filegroup( - name = "go_default_library_protos", - srcs = ["remote_api.proto"], visibility = ["//visibility:public"], ) diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/BUILD b/vendor/google.golang.org/appengine/internal/urlfetch/BUILD new file mode 100644 index 000000000000..6c5cfe30f80f --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/urlfetch/BUILD @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +filegroup( + name = "go_default_library_protos", + srcs = ["urlfetch_service.proto"], + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + srcs = ["urlfetch_service.pb.go"], + importpath = "google.golang.org/appengine/internal/urlfetch", + visibility = ["//vendor/google.golang.org/appengine:__subpackages__"], + deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go new file mode 100644 index 000000000000..af463fbb265c --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go @@ -0,0 +1,355 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto +// DO NOT EDIT! + +/* +Package urlfetch is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto + +It has these top-level messages: + URLFetchServiceError + URLFetchRequest + URLFetchResponse +*/ +package urlfetch + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type URLFetchServiceError_ErrorCode int32 + +const ( + URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0 + URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1 + URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2 + URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3 + URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4 + URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5 + URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6 + URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7 + URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8 + URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9 + URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10 + URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11 + URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12 +) + +var URLFetchServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_URL", + 2: "FETCH_ERROR", + 3: "UNSPECIFIED_ERROR", + 4: "RESPONSE_TOO_LARGE", + 5: "DEADLINE_EXCEEDED", + 6: "SSL_CERTIFICATE_ERROR", + 7: "DNS_ERROR", + 8: "CLOSED", + 9: "INTERNAL_TRANSIENT_ERROR", + 10: "TOO_MANY_REDIRECTS", + 11: "MALFORMED_REPLY", + 12: "CONNECTION_ERROR", +} +var URLFetchServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_URL": 1, + "FETCH_ERROR": 2, + "UNSPECIFIED_ERROR": 3, + "RESPONSE_TOO_LARGE": 4, + "DEADLINE_EXCEEDED": 5, + "SSL_CERTIFICATE_ERROR": 6, + "DNS_ERROR": 7, + "CLOSED": 8, + "INTERNAL_TRANSIENT_ERROR": 9, + "TOO_MANY_REDIRECTS": 10, + "MALFORMED_REPLY": 11, + "CONNECTION_ERROR": 12, +} + +func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode { + p := new(URLFetchServiceError_ErrorCode) + *p = x + return p +} +func (x URLFetchServiceError_ErrorCode) String() string { + return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x)) +} +func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode") + if err != nil { + return err + } + *x = URLFetchServiceError_ErrorCode(value) + return nil +} + +type URLFetchRequest_RequestMethod int32 + +const ( + URLFetchRequest_GET URLFetchRequest_RequestMethod = 1 + URLFetchRequest_POST URLFetchRequest_RequestMethod = 2 + URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3 + URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4 + URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5 + URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6 +) + +var URLFetchRequest_RequestMethod_name = map[int32]string{ + 1: "GET", + 2: "POST", + 3: "HEAD", + 4: "PUT", + 5: "DELETE", + 6: "PATCH", +} +var URLFetchRequest_RequestMethod_value = map[string]int32{ + "GET": 1, + "POST": 2, + "HEAD": 3, + "PUT": 4, + "DELETE": 5, + "PATCH": 6, +} + +func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod { + p := new(URLFetchRequest_RequestMethod) + *p = x + return p +} +func (x URLFetchRequest_RequestMethod) String() string { + return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x)) +} +func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod") + if err != nil { + return err + } + *x = URLFetchRequest_RequestMethod(value) + return nil +} + +type URLFetchServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} } +func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) } +func (*URLFetchServiceError) ProtoMessage() {} + +type URLFetchRequest struct { + Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"` + Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"` + Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"` + Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"` + FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"` + Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"` + MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} } +func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) } +func (*URLFetchRequest) ProtoMessage() {} + +const Default_URLFetchRequest_FollowRedirects bool = true +const Default_URLFetchRequest_MustValidateServerCertificate bool = true + +func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod { + if m != nil && m.Method != nil { + return *m.Method + } + return URLFetchRequest_GET +} + +func (m *URLFetchRequest) GetUrl() string { + if m != nil && m.Url != nil { + return *m.Url + } + return "" +} + +func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *URLFetchRequest) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +func (m *URLFetchRequest) GetFollowRedirects() bool { + if m != nil && m.FollowRedirects != nil { + return *m.FollowRedirects + } + return Default_URLFetchRequest_FollowRedirects +} + +func (m *URLFetchRequest) GetDeadline() float64 { + if m != nil && m.Deadline != nil { + return *m.Deadline + } + return 0 +} + +func (m *URLFetchRequest) GetMustValidateServerCertificate() bool { + if m != nil && m.MustValidateServerCertificate != nil { + return *m.MustValidateServerCertificate + } + return Default_URLFetchRequest_MustValidateServerCertificate +} + +type URLFetchRequest_Header struct { + Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} } +func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) } +func (*URLFetchRequest_Header) ProtoMessage() {} + +func (m *URLFetchRequest_Header) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *URLFetchRequest_Header) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type URLFetchResponse struct { + Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"` + StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"` + Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"` + ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"` + ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"` + ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"` + FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"` + ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"` + ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"` + ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} } +func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) } +func (*URLFetchResponse) ProtoMessage() {} + +const Default_URLFetchResponse_ContentWasTruncated bool = false +const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0 +const Default_URLFetchResponse_ApiBytesSent int64 = 0 +const Default_URLFetchResponse_ApiBytesReceived int64 = 0 + +func (m *URLFetchResponse) GetContent() []byte { + if m != nil { + return m.Content + } + return nil +} + +func (m *URLFetchResponse) GetStatusCode() int32 { + if m != nil && m.StatusCode != nil { + return *m.StatusCode + } + return 0 +} + +func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *URLFetchResponse) GetContentWasTruncated() bool { + if m != nil && m.ContentWasTruncated != nil { + return *m.ContentWasTruncated + } + return Default_URLFetchResponse_ContentWasTruncated +} + +func (m *URLFetchResponse) GetExternalBytesSent() int64 { + if m != nil && m.ExternalBytesSent != nil { + return *m.ExternalBytesSent + } + return 0 +} + +func (m *URLFetchResponse) GetExternalBytesReceived() int64 { + if m != nil && m.ExternalBytesReceived != nil { + return *m.ExternalBytesReceived + } + return 0 +} + +func (m *URLFetchResponse) GetFinalUrl() string { + if m != nil && m.FinalUrl != nil { + return *m.FinalUrl + } + return "" +} + +func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 { + if m != nil && m.ApiCpuMilliseconds != nil { + return *m.ApiCpuMilliseconds + } + return Default_URLFetchResponse_ApiCpuMilliseconds +} + +func (m *URLFetchResponse) GetApiBytesSent() int64 { + if m != nil && m.ApiBytesSent != nil { + return *m.ApiBytesSent + } + return Default_URLFetchResponse_ApiBytesSent +} + +func (m *URLFetchResponse) GetApiBytesReceived() int64 { + if m != nil && m.ApiBytesReceived != nil { + return *m.ApiBytesReceived + } + return Default_URLFetchResponse_ApiBytesReceived +} + +type URLFetchResponse_Header struct { + Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} } +func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) } +func (*URLFetchResponse_Header) ProtoMessage() {} + +func (m *URLFetchResponse_Header) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *URLFetchResponse_Header) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto new file mode 100644 index 000000000000..f695edf6a907 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto @@ -0,0 +1,64 @@ +syntax = "proto2"; +option go_package = "urlfetch"; + +package appengine; + +message URLFetchServiceError { + enum ErrorCode { + OK = 0; + INVALID_URL = 1; + FETCH_ERROR = 2; + UNSPECIFIED_ERROR = 3; + RESPONSE_TOO_LARGE = 4; + DEADLINE_EXCEEDED = 5; + SSL_CERTIFICATE_ERROR = 6; + DNS_ERROR = 7; + CLOSED = 8; + INTERNAL_TRANSIENT_ERROR = 9; + TOO_MANY_REDIRECTS = 10; + MALFORMED_REPLY = 11; + CONNECTION_ERROR = 12; + } +} + +message URLFetchRequest { + enum RequestMethod { + GET = 1; + POST = 2; + HEAD = 3; + PUT = 4; + DELETE = 5; + PATCH = 6; + } + required RequestMethod Method = 1; + required string Url = 2; + repeated group Header = 3 { + required string Key = 4; + required string Value = 5; + } + optional bytes Payload = 6 [ctype=CORD]; + + optional bool FollowRedirects = 7 [default=true]; + + optional double Deadline = 8; + + optional bool MustValidateServerCertificate = 9 [default=true]; +} + +message URLFetchResponse { + optional bytes Content = 1; + required int32 StatusCode = 2; + repeated group Header = 3 { + required string Key = 4; + required string Value = 5; + } + optional bool ContentWasTruncated = 6 [default=false]; + optional int64 ExternalBytesSent = 7; + optional int64 ExternalBytesReceived = 8; + + optional string FinalUrl = 9; + + optional int64 ApiCpuMilliseconds = 10 [default=0]; + optional int64 ApiBytesSent = 11 [default=0]; + optional int64 ApiBytesReceived = 12 [default=0]; +} diff --git a/vendor/google.golang.org/appengine/namespace_test.go b/vendor/google.golang.org/appengine/namespace_test.go new file mode 100644 index 000000000000..847f640bd0b2 --- /dev/null +++ b/vendor/google.golang.org/appengine/namespace_test.go @@ -0,0 +1,39 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestNamespaceValidity(t *testing.T) { + testCases := []struct { + namespace string + ok bool + }{ + // data from Python's namespace_manager_test.py + {"", true}, + {"__a.namespace.123__", true}, + {"-_A....NAMESPACE-_", true}, + {"-", true}, + {".", true}, + {".-", true}, + + {"?", false}, + {"+", false}, + {"!", false}, + {" ", false}, + } + for _, tc := range testCases { + _, err := Namespace(context.Background(), tc.namespace) + if err == nil && !tc.ok { + t.Errorf("Namespace %q should be rejected, but wasn't", tc.namespace) + } else if err != nil && tc.ok { + t.Errorf("Namespace %q should be accepted, but wasn't", tc.namespace) + } + } +} diff --git a/vendor/google.golang.org/appengine/urlfetch/BUILD b/vendor/google.golang.org/appengine/urlfetch/BUILD new file mode 100644 index 000000000000..a4215fd132b3 --- /dev/null +++ b/vendor/google.golang.org/appengine/urlfetch/BUILD @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["urlfetch.go"], + importpath = "google.golang.org/appengine/urlfetch", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/appengine/internal:go_default_library", + "//vendor/google.golang.org/appengine/internal/urlfetch:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go new file mode 100644 index 000000000000..6ffe1e6d901a --- /dev/null +++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -0,0 +1,210 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package urlfetch provides an http.RoundTripper implementation +// for fetching URLs via App Engine's urlfetch service. +package urlfetch // import "google.golang.org/appengine/urlfetch" + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/urlfetch" +) + +// Transport is an implementation of http.RoundTripper for +// App Engine. Users should generally create an http.Client using +// this transport and use the Client rather than using this transport +// directly. +type Transport struct { + Context context.Context + + // Controls whether the application checks the validity of SSL certificates + // over HTTPS connections. A value of false (the default) instructs the + // application to send a request to the server only if the certificate is + // valid and signed by a trusted certificate authority (CA), and also + // includes a hostname that matches the certificate. A value of true + // instructs the application to perform no certificate validation. + AllowInvalidServerCertificate bool +} + +// Verify statically that *Transport implements http.RoundTripper. +var _ http.RoundTripper = (*Transport)(nil) + +// Client returns an *http.Client using a default urlfetch Transport. This +// client will have the default deadline of 5 seconds, and will check the +// validity of SSL certificates. +// +// Any deadline of the provided context will be used for requests through this client; +// if the client does not have a deadline then a 5 second default is used. +func Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &Transport{ + Context: ctx, + }, + } +} + +type bodyReader struct { + content []byte + truncated bool + closed bool +} + +// ErrTruncatedBody is the error returned after the final Read() from a +// response's Body if the body has been truncated by App Engine's proxy. +var ErrTruncatedBody = errors.New("urlfetch: truncated body") + +func statusCodeToText(code int) string { + if t := http.StatusText(code); t != "" { + return t + } + return strconv.Itoa(code) +} + +func (br *bodyReader) Read(p []byte) (n int, err error) { + if br.closed { + if br.truncated { + return 0, ErrTruncatedBody + } + return 0, io.EOF + } + n = copy(p, br.content) + if n > 0 { + br.content = br.content[n:] + return + } + if br.truncated { + br.closed = true + return 0, ErrTruncatedBody + } + return 0, io.EOF +} + +func (br *bodyReader) Close() error { + br.closed = true + br.content = nil + return nil +} + +// A map of the URL Fetch-accepted methods that take a request body. +var methodAcceptsRequestBody = map[string]bool{ + "POST": true, + "PUT": true, + "PATCH": true, +} + +// urlString returns a valid string given a URL. This function is necessary because +// the String method of URL doesn't correctly handle URLs with non-empty Opaque values. +// See http://code.google.com/p/go/issues/detail?id=4860. +func urlString(u *url.URL) string { + if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") { + return u.String() + } + aux := *u + aux.Opaque = "//" + aux.Host + aux.Opaque + return aux.String() +} + +// RoundTrip issues a single HTTP request and returns its response. Per the +// http.RoundTripper interface, RoundTrip only returns an error if there +// was an unsupported request or the URL Fetch proxy fails. +// Note that HTTP response codes such as 5xx, 403, 404, etc are not +// errors as far as the transport is concerned and will be returned +// with err set to nil. +func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) { + methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method] + if !ok { + return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method) + } + + method := pb.URLFetchRequest_RequestMethod(methNum) + + freq := &pb.URLFetchRequest{ + Method: &method, + Url: proto.String(urlString(req.URL)), + FollowRedirects: proto.Bool(false), // http.Client's responsibility + MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate), + } + if deadline, ok := t.Context.Deadline(); ok { + freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds()) + } + + for k, vals := range req.Header { + for _, val := range vals { + freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{ + Key: proto.String(k), + Value: proto.String(val), + }) + } + } + if methodAcceptsRequestBody[req.Method] && req.Body != nil { + // Avoid a []byte copy if req.Body has a Bytes method. + switch b := req.Body.(type) { + case interface { + Bytes() []byte + }: + freq.Payload = b.Bytes() + default: + freq.Payload, err = ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + } + } + + fres := &pb.URLFetchResponse{} + if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil { + return nil, err + } + + res = &http.Response{} + res.StatusCode = int(*fres.StatusCode) + res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode)) + res.Header = make(http.Header) + res.Request = req + + // Faked: + res.ProtoMajor = 1 + res.ProtoMinor = 1 + res.Proto = "HTTP/1.1" + res.Close = true + + for _, h := range fres.Header { + hkey := http.CanonicalHeaderKey(*h.Key) + hval := *h.Value + if hkey == "Content-Length" { + // Will get filled in below for all but HEAD requests. + if req.Method == "HEAD" { + res.ContentLength, _ = strconv.ParseInt(hval, 10, 64) + } + continue + } + res.Header.Add(hkey, hval) + } + + if req.Method != "HEAD" { + res.ContentLength = int64(len(fres.Content)) + } + + truncated := fres.GetContentWasTruncated() + res.Body = &bodyReader{content: fres.Content, truncated: truncated} + return +} + +func init() { + internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name) + internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED)) +} diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml deleted file mode 100644 index 88511623d923..000000000000 --- a/vendor/google.golang.org/grpc/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go: - - 1.5.4 - - 1.6.3 - - 1.7 - -go_import_path: google.golang.org/grpc - -before_install: - - go get -u golang.org/x/tools/cmd/goimports github.com/golang/lint/golint github.com/axw/gocov/gocov github.com/mattn/goveralls golang.org/x/tools/cmd/cover - -script: - - '! gofmt -s -d -l . 2>&1 | read' - - '! goimports -l . | read' - - '! golint ./... | grep -vE "(_string|\.pb)\.go:"' - - '! go tool vet -all . 2>&1 | grep -vE "constant [0-9]+ not a string in call to Errorf" | grep -vF .pb.go:' # https://github.com/golang/protobuf/issues/214 - - make test testrace diff --git a/vendor/google.golang.org/grpc/BUILD b/vendor/google.golang.org/grpc/BUILD deleted file mode 100644 index b6f51ba0f453..000000000000 --- a/vendor/google.golang.org/grpc/BUILD +++ /dev/null @@ -1,59 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "backoff.go", - "balancer.go", - "call.go", - "clientconn.go", - "doc.go", - "interceptor.go", - "rpc_util.go", - "server.go", - "stream.go", - "trace.go", - ], - importpath = "google.golang.org/grpc", - deps = [ - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/golang.org/x/net/http2:go_default_library", - "//vendor/golang.org/x/net/trace:go_default_library", - "//vendor/google.golang.org/grpc/codes:go_default_library", - "//vendor/google.golang.org/grpc/credentials:go_default_library", - "//vendor/google.golang.org/grpc/grpclog:go_default_library", - "//vendor/google.golang.org/grpc/internal:go_default_library", - "//vendor/google.golang.org/grpc/metadata:go_default_library", - "//vendor/google.golang.org/grpc/naming:go_default_library", - "//vendor/google.golang.org/grpc/transport:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//vendor/google.golang.org/grpc/codes:all-srcs", - "//vendor/google.golang.org/grpc/credentials:all-srcs", - "//vendor/google.golang.org/grpc/grpclog:all-srcs", - "//vendor/google.golang.org/grpc/internal:all-srcs", - "//vendor/google.golang.org/grpc/metadata:all-srcs", - "//vendor/google.golang.org/grpc/naming:all-srcs", - "//vendor/google.golang.org/grpc/peer:all-srcs", - "//vendor/google.golang.org/grpc/transport:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md deleted file mode 100644 index 36cd6f7581bb..000000000000 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ /dev/null @@ -1,46 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to grpc! Here are some guidelines -and information about how to do so. - -## Sending patches - -### Getting started - -1. Check out the code: - - $ go get google.golang.org/grpc - $ cd $GOPATH/src/google.golang.org/grpc - -1. Create a fork of the grpc-go repository. -1. Add your fork as a remote: - - $ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git - -1. Make changes, commit them. -1. Run the test suite: - - $ make test - -1. Push your changes to your fork: - - $ git push fork ... - -1. Open a pull request. - -## Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -## Filing Issues -When filing an issue, make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -### Contributing code -Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE deleted file mode 100644 index f4988b450799..000000000000 --- a/vendor/google.golang.org/grpc/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2014, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile deleted file mode 100644 index 03bb01f0b35b..000000000000 --- a/vendor/google.golang.org/grpc/Makefile +++ /dev/null @@ -1,52 +0,0 @@ -all: test testrace - -deps: - go get -d -v google.golang.org/grpc/... - -updatedeps: - go get -d -v -u -f google.golang.org/grpc/... - -testdeps: - go get -d -v -t google.golang.org/grpc/... - -updatetestdeps: - go get -d -v -t -u -f google.golang.org/grpc/... - -build: deps - go build google.golang.org/grpc/... - -proto: - @ if ! which protoc > /dev/null; then \ - echo "error: protoc not installed" >&2; \ - exit 1; \ - fi - go get -u -v github.com/golang/protobuf/protoc-gen-go - # use $$dir as the root for all proto files in the same directory - for dir in $$(git ls-files '*.proto' | xargs -n1 dirname | uniq); do \ - protoc -I $$dir --go_out=plugins=grpc:$$dir $$dir/*.proto; \ - done - -test: testdeps - go test -v -cpu 1,4 google.golang.org/grpc/... - -testrace: testdeps - go test -v -race -cpu 1,4 google.golang.org/grpc/... - -clean: - go clean -i google.golang.org/grpc/... - -coverage: testdeps - ./coverage.sh --coveralls - -.PHONY: \ - all \ - deps \ - updatedeps \ - testdeps \ - updatetestdeps \ - build \ - proto \ - test \ - testrace \ - clean \ - coverage diff --git a/vendor/google.golang.org/grpc/PATENTS b/vendor/google.golang.org/grpc/PATENTS deleted file mode 100644 index 69b47959faba..000000000000 --- a/vendor/google.golang.org/grpc/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the gRPC project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of gRPC, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of gRPC. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of gRPC or any code incorporated within this -implementation of gRPC constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of gRPC -shall terminate as of the date such litigation is filed. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md deleted file mode 100644 index 660658bed30e..000000000000 --- a/vendor/google.golang.org/grpc/README.md +++ /dev/null @@ -1,32 +0,0 @@ -#gRPC-Go - -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) - -The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide. - -Installation ------------- - -To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: - -``` -$ go get google.golang.org/grpc -``` - -Prerequisites -------------- - -This requires Go 1.5 or later . - -Constraints ------------ -The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. - -Documentation -------------- -See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). - -Status ------- -GA - diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go deleted file mode 100644 index 52f4f10fc255..000000000000 --- a/vendor/google.golang.org/grpc/backoff.go +++ /dev/null @@ -1,80 +0,0 @@ -package grpc - -import ( - "math/rand" - "time" -) - -// DefaultBackoffConfig uses values specified for backoff in -// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. -var ( - DefaultBackoffConfig = BackoffConfig{ - MaxDelay: 120 * time.Second, - baseDelay: 1.0 * time.Second, - factor: 1.6, - jitter: 0.2, - } -) - -// backoffStrategy defines the methodology for backing off after a grpc -// connection failure. -// -// This is unexported until the gRPC project decides whether or not to allow -// alternative backoff strategies. Once a decision is made, this type and its -// method may be exported. -type backoffStrategy interface { - // backoff returns the amount of time to wait before the next retry given - // the number of consecutive failures. - backoff(retries int) time.Duration -} - -// BackoffConfig defines the parameters for the default gRPC backoff strategy. -type BackoffConfig struct { - // MaxDelay is the upper bound of backoff delay. - MaxDelay time.Duration - - // TODO(stevvooe): The following fields are not exported, as allowing - // changes would violate the current gRPC specification for backoff. If - // gRPC decides to allow more interesting backoff strategies, these fields - // may be opened up in the future. - - // baseDelay is the amount of time to wait before retrying after the first - // failure. - baseDelay time.Duration - - // factor is applied to the backoff after each retry. - factor float64 - - // jitter provides a range to randomize backoff delays. - jitter float64 -} - -func setDefaults(bc *BackoffConfig) { - md := bc.MaxDelay - *bc = DefaultBackoffConfig - - if md > 0 { - bc.MaxDelay = md - } -} - -func (bc BackoffConfig) backoff(retries int) (t time.Duration) { - if retries == 0 { - return bc.baseDelay - } - backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay) - for backoff < max && retries > 0 { - backoff *= bc.factor - retries-- - } - if backoff > max { - backoff = max - } - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. - backoff *= 1 + bc.jitter*(rand.Float64()*2-1) - if backoff < 0 { - return 0 - } - return time.Duration(backoff) -} diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go deleted file mode 100644 index 17767768d37c..000000000000 --- a/vendor/google.golang.org/grpc/balancer.go +++ /dev/null @@ -1,390 +0,0 @@ -/* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "fmt" - "sync" - - "golang.org/x/net/context" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/naming" -) - -// Address represents a server the client connects to. -// This is the EXPERIMENTAL API and may be changed or extended in the future. -type Address struct { - // Addr is the server address on which a connection will be established. - Addr string - // Metadata is the information associated with Addr, which may be used - // to make load balancing decision. - Metadata interface{} -} - -// BalancerGetOptions configures a Get call. -// This is the EXPERIMENTAL API and may be changed or extended in the future. -type BalancerGetOptions struct { - // BlockingWait specifies whether Get should block when there is no - // connected address. - BlockingWait bool -} - -// Balancer chooses network addresses for RPCs. -// This is the EXPERIMENTAL API and may be changed or extended in the future. -type Balancer interface { - // Start does the initialization work to bootstrap a Balancer. For example, - // this function may start the name resolution and watch the updates. It will - // be called when dialing. - Start(target string) error - // Up informs the Balancer that gRPC has a connection to the server at - // addr. It returns down which is called once the connection to addr gets - // lost or closed. - // TODO: It is not clear how to construct and take advantage the meaningful error - // parameter for down. Need realistic demands to guide. - Up(addr Address) (down func(error)) - // Get gets the address of a server for the RPC corresponding to ctx. - // i) If it returns a connected address, gRPC internals issues the RPC on the - // connection to this address; - // ii) If it returns an address on which the connection is under construction - // (initiated by Notify(...)) but not connected, gRPC internals - // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or - // Shutdown state; - // or - // * issues RPC on the connection otherwise. - // iii) If it returns an address on which the connection does not exist, gRPC - // internals treats it as an error and will fail the corresponding RPC. - // - // Therefore, the following is the recommended rule when writing a custom Balancer. - // If opts.BlockingWait is true, it should return a connected address or - // block if there is no connected address. It should respect the timeout or - // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast - // RPCs), it should return an address it has notified via Notify(...) immediately - // instead of blocking. - // - // The function returns put which is called once the rpc has completed or failed. - // put can collect and report RPC stats to a remote load balancer. - // - // This function should only return the errors Balancer cannot recover by itself. - // gRPC internals will fail the RPC if an error is returned. - Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) - // Notify returns a channel that is used by gRPC internals to watch the addresses - // gRPC needs to connect. The addresses might be from a name resolver or remote - // load balancer. gRPC internals will compare it with the existing connected - // addresses. If the address Balancer notified is not in the existing connected - // addresses, gRPC starts to connect the address. If an address in the existing - // connected addresses is not in the notification list, the corresponding connection - // is shutdown gracefully. Otherwise, there are no operations to take. Note that - // the Address slice must be the full list of the Addresses which should be connected. - // It is NOT delta. - Notify() <-chan []Address - // Close shuts down the balancer. - Close() error -} - -// downErr implements net.Error. It is constructed by gRPC internals and passed to the down -// call of Balancer. -type downErr struct { - timeout bool - temporary bool - desc string -} - -func (e downErr) Error() string { return e.desc } -func (e downErr) Timeout() bool { return e.timeout } -func (e downErr) Temporary() bool { return e.temporary } - -func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr { - return downErr{ - timeout: timeout, - temporary: temporary, - desc: fmt.Sprintf(format, a...), - } -} - -// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch -// the name resolution updates and updates the addresses available correspondingly. -func RoundRobin(r naming.Resolver) Balancer { - return &roundRobin{r: r} -} - -type addrInfo struct { - addr Address - connected bool -} - -type roundRobin struct { - r naming.Resolver - w naming.Watcher - addrs []*addrInfo // all the addresses the client should potentially connect - mu sync.Mutex - addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. - next int // index of the next address to return for Get() - waitCh chan struct{} // the channel to block when there is no connected address available - done bool // The Balancer is closed. -} - -func (rr *roundRobin) watchAddrUpdates() error { - updates, err := rr.w.Next() - if err != nil { - grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err) - return err - } - rr.mu.Lock() - defer rr.mu.Unlock() - for _, update := range updates { - addr := Address{ - Addr: update.Addr, - Metadata: update.Metadata, - } - switch update.Op { - case naming.Add: - var exist bool - for _, v := range rr.addrs { - if addr == v.addr { - exist = true - grpclog.Println("grpc: The name resolver wanted to add an existing address: ", addr) - break - } - } - if exist { - continue - } - rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) - case naming.Delete: - for i, v := range rr.addrs { - if addr == v.addr { - copy(rr.addrs[i:], rr.addrs[i+1:]) - rr.addrs = rr.addrs[:len(rr.addrs)-1] - break - } - } - default: - grpclog.Println("Unknown update.Op ", update.Op) - } - } - // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. - open := make([]Address, len(rr.addrs)) - for i, v := range rr.addrs { - open[i] = v.addr - } - if rr.done { - return ErrClientConnClosing - } - rr.addrCh <- open - return nil -} - -func (rr *roundRobin) Start(target string) error { - rr.mu.Lock() - defer rr.mu.Unlock() - if rr.done { - return ErrClientConnClosing - } - if rr.r == nil { - // If there is no name resolver installed, it is not needed to - // do name resolution. In this case, target is added into rr.addrs - // as the only address available and rr.addrCh stays nil. - rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) - return nil - } - w, err := rr.r.Resolve(target) - if err != nil { - return err - } - rr.w = w - rr.addrCh = make(chan []Address) - go func() { - for { - if err := rr.watchAddrUpdates(); err != nil { - return - } - } - }() - return nil -} - -// Up sets the connected state of addr and sends notification if there are pending -// Get() calls. -func (rr *roundRobin) Up(addr Address) func(error) { - rr.mu.Lock() - defer rr.mu.Unlock() - var cnt int - for _, a := range rr.addrs { - if a.addr == addr { - if a.connected { - return nil - } - a.connected = true - } - if a.connected { - cnt++ - } - } - // addr is only one which is connected. Notify the Get() callers who are blocking. - if cnt == 1 && rr.waitCh != nil { - close(rr.waitCh) - rr.waitCh = nil - } - return func(err error) { - rr.down(addr, err) - } -} - -// down unsets the connected state of addr. -func (rr *roundRobin) down(addr Address, err error) { - rr.mu.Lock() - defer rr.mu.Unlock() - for _, a := range rr.addrs { - if addr == a.addr { - a.connected = false - break - } - } -} - -// Get returns the next addr in the rotation. -func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { - var ch chan struct{} - rr.mu.Lock() - if rr.done { - rr.mu.Unlock() - err = ErrClientConnClosing - return - } - - if len(rr.addrs) > 0 { - if rr.next >= len(rr.addrs) { - rr.next = 0 - } - next := rr.next - for { - a := rr.addrs[next] - next = (next + 1) % len(rr.addrs) - if a.connected { - addr = a.addr - rr.next = next - rr.mu.Unlock() - return - } - if next == rr.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - if !opts.BlockingWait { - if len(rr.addrs) == 0 { - rr.mu.Unlock() - err = fmt.Errorf("there is no address available") - return - } - // Returns the next addr on rr.addrs for failfast RPCs. - addr = rr.addrs[rr.next].addr - rr.next++ - rr.mu.Unlock() - return - } - // Wait on rr.waitCh for non-failfast RPCs. - if rr.waitCh == nil { - ch = make(chan struct{}) - rr.waitCh = ch - } else { - ch = rr.waitCh - } - rr.mu.Unlock() - for { - select { - case <-ctx.Done(): - err = ctx.Err() - return - case <-ch: - rr.mu.Lock() - if rr.done { - rr.mu.Unlock() - err = ErrClientConnClosing - return - } - - if len(rr.addrs) > 0 { - if rr.next >= len(rr.addrs) { - rr.next = 0 - } - next := rr.next - for { - a := rr.addrs[next] - next = (next + 1) % len(rr.addrs) - if a.connected { - addr = a.addr - rr.next = next - rr.mu.Unlock() - return - } - if next == rr.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - // The newly added addr got removed by Down() again. - if rr.waitCh == nil { - ch = make(chan struct{}) - rr.waitCh = ch - } else { - ch = rr.waitCh - } - rr.mu.Unlock() - } - } -} - -func (rr *roundRobin) Notify() <-chan []Address { - return rr.addrCh -} - -func (rr *roundRobin) Close() error { - rr.mu.Lock() - defer rr.mu.Unlock() - rr.done = true - if rr.w != nil { - rr.w.Close() - } - if rr.waitCh != nil { - close(rr.waitCh) - rr.waitCh = nil - } - if rr.addrCh != nil { - close(rr.addrCh) - } - return nil -} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go deleted file mode 100644 index 788b3d928117..000000000000 --- a/vendor/google.golang.org/grpc/call.go +++ /dev/null @@ -1,233 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "bytes" - "io" - "math" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/transport" -) - -// recvResponse receives and parses an RPC response. -// On error, it returns the error and indicates whether the call should be retried. -// -// TODO(zhaoq): Check whether the received message sequence is valid. -func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { - // Try to acquire header metadata from the server if there is any. - var err error - defer func() { - if err != nil { - if _, ok := err.(transport.ConnectionError); !ok { - t.CloseStream(stream, err) - } - } - }() - c.headerMD, err = stream.Header() - if err != nil { - return err - } - p := &parser{r: stream} - for { - if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32); err != nil { - if err == io.EOF { - break - } - return err - } - } - c.trailerMD = stream.Trailer() - return nil -} - -// sendRequest writes out various information of an RPC such as Context and Message. -func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { - stream, err := t.NewStream(ctx, callHdr) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - // If err is connection error, t will be closed, no need to close stream here. - if _, ok := err.(transport.ConnectionError); !ok { - t.CloseStream(stream, err) - } - } - }() - var cbuf *bytes.Buffer - if compressor != nil { - cbuf = new(bytes.Buffer) - } - outBuf, err := encode(codec, args, compressor, cbuf) - if err != nil { - return nil, Errorf(codes.Internal, "grpc: %v", err) - } - err = t.Write(stream, outBuf, opts) - // t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method - // does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following - // recvResponse to get the final status. - if err != nil && err != io.EOF { - return nil, err - } - // Sent successfully. - return stream, nil -} - -// Invoke sends the RPC request on the wire and returns after response is received. -// Invoke is called by generated code. Also users can call Invoke directly when it -// is really needed in their use cases. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { - if cc.dopts.unaryInt != nil { - return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) - } - return invoke(ctx, method, args, reply, cc, opts...) -} - -func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { - c := defaultCallInfo - for _, o := range opts { - if err := o.before(&c); err != nil { - return toRPCErr(err) - } - } - defer func() { - for _, o := range opts { - o.after(&c) - } - }() - if EnableTracing { - c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) - defer c.traceInfo.tr.Finish() - c.traceInfo.firstLine.client = true - if deadline, ok := ctx.Deadline(); ok { - c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) - } - c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) - // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. - defer func() { - if err != nil { - c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - c.traceInfo.tr.SetError() - } - }() - } - topts := &transport.Options{ - Last: true, - Delay: false, - } - for { - var ( - err error - t transport.ClientTransport - stream *transport.Stream - // Record the put handler from Balancer.Get(...). It is called once the - // RPC has completed or failed. - put func() - ) - // TODO(zhaoq): Need a formal spec of fail-fast. - callHdr := &transport.CallHdr{ - Host: cc.authority, - Method: method, - } - if cc.dopts.cp != nil { - callHdr.SendCompress = cc.dopts.cp.Type() - } - gopts := BalancerGetOptions{ - BlockingWait: !c.failFast, - } - t, put, err = cc.getTransport(ctx, gopts) - if err != nil { - // TODO(zhaoq): Probably revisit the error handling. - if _, ok := err.(*rpcError); ok { - return err - } - if err == errConnClosing || err == errConnUnavailable { - if c.failFast { - return Errorf(codes.Unavailable, "%v", err) - } - continue - } - // All the other errors are treated as Internal errors. - return Errorf(codes.Internal, "%v", err) - } - if c.traceInfo.tr != nil { - c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) - } - stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts) - if err != nil { - if put != nil { - put() - put = nil - } - // Retry a non-failfast RPC when - // i) there is a connection error; or - // ii) the server started to drain before this RPC was initiated. - if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { - if c.failFast { - return toRPCErr(err) - } - continue - } - return toRPCErr(err) - } - err = recvResponse(cc.dopts, t, &c, stream, reply) - if err != nil { - if put != nil { - put() - put = nil - } - if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { - if c.failFast { - return toRPCErr(err) - } - continue - } - return toRPCErr(err) - } - if c.traceInfo.tr != nil { - c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) - } - t.CloseStream(stream, nil) - if put != nil { - put() - put = nil - } - return Errorf(stream.StatusCode(), "%s", stream.StatusDesc()) - } -} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go deleted file mode 100644 index aa02f96ad6c3..000000000000 --- a/vendor/google.golang.org/grpc/clientconn.go +++ /dev/null @@ -1,878 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "errors" - "fmt" - "net" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/trace" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/transport" -) - -var ( - // ErrClientConnClosing indicates that the operation is illegal because - // the ClientConn is closing. - ErrClientConnClosing = errors.New("grpc: the client connection is closing") - // ErrClientConnTimeout indicates that the ClientConn cannot establish the - // underlying connections within the specified timeout. - ErrClientConnTimeout = errors.New("grpc: timed out when dialing") - - // errNoTransportSecurity indicates that there is no transport security - // being set for ClientConn. Users should either set one or explicitly - // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") - // errTransportCredentialsMissing indicates that users want to transmit security - // information (e.g., oauth2 token) which requires secure connection on an insecure - // connection. - errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") - // errCredentialsConflict indicates that grpc.WithTransportCredentials() - // and grpc.WithInsecure() are both called for a connection. - errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") - // errNetworkIO indicates that the connection is down due to some network I/O error. - errNetworkIO = errors.New("grpc: failed with network I/O error") - // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. - errConnDrain = errors.New("grpc: the connection is drained") - // errConnClosing indicates that the connection is closing. - errConnClosing = errors.New("grpc: the connection is closing") - // errConnUnavailable indicates that the connection is unavailable. - errConnUnavailable = errors.New("grpc: the connection is unavailable") - errNoAddr = errors.New("grpc: there is no address available to dial") - // minimum time to give a connection to complete - minConnectTimeout = 20 * time.Second -) - -// dialOptions configure a Dial call. dialOptions are set by the DialOption -// values passed to Dial. -type dialOptions struct { - unaryInt UnaryClientInterceptor - streamInt StreamClientInterceptor - codec Codec - cp Compressor - dc Decompressor - bs backoffStrategy - balancer Balancer - block bool - insecure bool - timeout time.Duration - copts transport.ConnectOptions -} - -// DialOption configures how we set up the connection. -type DialOption func(*dialOptions) - -// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling. -func WithCodec(c Codec) DialOption { - return func(o *dialOptions) { - o.codec = c - } -} - -// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message -// compressor. -func WithCompressor(cp Compressor) DialOption { - return func(o *dialOptions) { - o.cp = cp - } -} - -// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating -// message decompressor. -func WithDecompressor(dc Decompressor) DialOption { - return func(o *dialOptions) { - o.dc = dc - } -} - -// WithBalancer returns a DialOption which sets a load balancer. -func WithBalancer(b Balancer) DialOption { - return func(o *dialOptions) { - o.balancer = b - } -} - -// WithBackoffMaxDelay configures the dialer to use the provided maximum delay -// when backing off after failed connection attempts. -func WithBackoffMaxDelay(md time.Duration) DialOption { - return WithBackoffConfig(BackoffConfig{MaxDelay: md}) -} - -// WithBackoffConfig configures the dialer to use the provided backoff -// parameters after connection failures. -// -// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up -// for use. -func WithBackoffConfig(b BackoffConfig) DialOption { - // Set defaults to ensure that provided BackoffConfig is valid and - // unexported fields get default values. - setDefaults(&b) - return withBackoff(b) -} - -// withBackoff sets the backoff strategy used for retries after a -// failed connection attempt. -// -// This can be exported if arbitrary backoff strategies are allowed by gRPC. -func withBackoff(bs backoffStrategy) DialOption { - return func(o *dialOptions) { - o.bs = bs - } -} - -// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying -// connection is up. Without this, Dial returns immediately and connecting the server -// happens in background. -func WithBlock() DialOption { - return func(o *dialOptions) { - o.block = true - } -} - -// WithInsecure returns a DialOption which disables transport security for this ClientConn. -// Note that transport security is required unless WithInsecure is set. -func WithInsecure() DialOption { - return func(o *dialOptions) { - o.insecure = true - } -} - -// WithTransportCredentials returns a DialOption which configures a -// connection level security credentials (e.g., TLS/SSL). -func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { - return func(o *dialOptions) { - o.copts.TransportCredentials = creds - } -} - -// WithPerRPCCredentials returns a DialOption which sets -// credentials which will place auth state on each outbound RPC. -func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { - return func(o *dialOptions) { - o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) - } -} - -// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn -// initially. This is valid if and only if WithBlock() is present. -func WithTimeout(d time.Duration) DialOption { - return func(o *dialOptions) { - o.timeout = d - } -} - -// WithDialer returns a DialOption that specifies a function to use for dialing network addresses. -func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { - return func(o *dialOptions) { - o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) { - if deadline, ok := ctx.Deadline(); ok { - return f(addr, deadline.Sub(time.Now())) - } - return f(addr, 0) - } - } -} - -// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs. -func WithUserAgent(s string) DialOption { - return func(o *dialOptions) { - o.copts.UserAgent = s - } -} - -// WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs. -func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { - return func(o *dialOptions) { - o.unaryInt = f - } -} - -// WithStreamInterceptor returns a DialOption that specifies the interceptor for streaming RPCs. -func WithStreamInterceptor(f StreamClientInterceptor) DialOption { - return func(o *dialOptions) { - o.streamInt = f - } -} - -// Dial creates a client connection to the given target. -func Dial(target string, opts ...DialOption) (*ClientConn, error) { - return DialContext(context.Background(), target, opts...) -} - -// DialContext creates a client connection to the given target. ctx can be used to -// cancel or expire the pending connecting. Once this function returns, the -// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close -// to terminate all the pending operations after this function returns. -// This is the EXPERIMENTAL API. -func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { - cc := &ClientConn{ - target: target, - conns: make(map[Address]*addrConn), - } - cc.ctx, cc.cancel = context.WithCancel(context.Background()) - defer func() { - select { - case <-ctx.Done(): - conn, err = nil, ctx.Err() - default: - } - - if err != nil { - cc.Close() - } - }() - - for _, opt := range opts { - opt(&cc.dopts) - } - - // Set defaults. - if cc.dopts.codec == nil { - cc.dopts.codec = protoCodec{} - } - if cc.dopts.bs == nil { - cc.dopts.bs = DefaultBackoffConfig - } - - var ok bool - waitC := make(chan error, 1) - go func() { - var addrs []Address - if cc.dopts.balancer == nil { - // Connect to target directly if balancer is nil. - addrs = append(addrs, Address{Addr: target}) - } else { - if err := cc.dopts.balancer.Start(target); err != nil { - waitC <- err - return - } - ch := cc.dopts.balancer.Notify() - if ch == nil { - // There is no name resolver installed. - addrs = append(addrs, Address{Addr: target}) - } else { - addrs, ok = <-ch - if !ok || len(addrs) == 0 { - waitC <- errNoAddr - return - } - } - } - for _, a := range addrs { - if err := cc.resetAddrConn(a, false, nil); err != nil { - waitC <- err - return - } - } - close(waitC) - }() - var timeoutCh <-chan time.Time - if cc.dopts.timeout > 0 { - timeoutCh = time.After(cc.dopts.timeout) - } - select { - case <-ctx.Done(): - return nil, ctx.Err() - case err := <-waitC: - if err != nil { - return nil, err - } - case <-timeoutCh: - return nil, ErrClientConnTimeout - } - // If balancer is nil or balancer.Notify() is nil, ok will be false here. - // The lbWatcher goroutine will not be created. - if ok { - go cc.lbWatcher() - } - creds := cc.dopts.copts.TransportCredentials - if creds != nil && creds.Info().ServerName != "" { - cc.authority = creds.Info().ServerName - } else { - colonPos := strings.LastIndex(target, ":") - if colonPos == -1 { - colonPos = len(target) - } - cc.authority = target[:colonPos] - } - return cc, nil -} - -// ConnectivityState indicates the state of a client connection. -type ConnectivityState int - -const ( - // Idle indicates the ClientConn is idle. - Idle ConnectivityState = iota - // Connecting indicates the ClienConn is connecting. - Connecting - // Ready indicates the ClientConn is ready for work. - Ready - // TransientFailure indicates the ClientConn has seen a failure but expects to recover. - TransientFailure - // Shutdown indicates the ClientConn has started shutting down. - Shutdown -) - -func (s ConnectivityState) String() string { - switch s { - case Idle: - return "IDLE" - case Connecting: - return "CONNECTING" - case Ready: - return "READY" - case TransientFailure: - return "TRANSIENT_FAILURE" - case Shutdown: - return "SHUTDOWN" - default: - panic(fmt.Sprintf("unknown connectivity state: %d", s)) - } -} - -// ClientConn represents a client connection to an RPC server. -type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - authority string - dopts dialOptions - - mu sync.RWMutex - conns map[Address]*addrConn -} - -func (cc *ClientConn) lbWatcher() { - for addrs := range cc.dopts.balancer.Notify() { - var ( - add []Address // Addresses need to setup connections. - del []*addrConn // Connections need to tear down. - ) - cc.mu.Lock() - for _, a := range addrs { - if _, ok := cc.conns[a]; !ok { - add = append(add, a) - } - } - for k, c := range cc.conns { - var keep bool - for _, a := range addrs { - if k == a { - keep = true - break - } - } - if !keep { - del = append(del, c) - delete(cc.conns, c.addr) - } - } - cc.mu.Unlock() - for _, a := range add { - cc.resetAddrConn(a, true, nil) - } - for _, c := range del { - c.tearDown(errConnDrain) - } - } -} - -// resetAddrConn creates an addrConn for addr and adds it to cc.conns. -// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason. -// If tearDownErr is nil, errConnDrain will be used instead. -func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr error) error { - ac := &addrConn{ - cc: cc, - addr: addr, - dopts: cc.dopts, - } - ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - ac.stateCV = sync.NewCond(&ac.mu) - if EnableTracing { - ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr) - } - if !ac.dopts.insecure { - if ac.dopts.copts.TransportCredentials == nil { - return errNoTransportSecurity - } - } else { - if ac.dopts.copts.TransportCredentials != nil { - return errCredentialsConflict - } - for _, cd := range ac.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return errTransportCredentialsMissing - } - } - } - // Track ac in cc. This needs to be done before any getTransport(...) is called. - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return ErrClientConnClosing - } - stale := cc.conns[ac.addr] - cc.conns[ac.addr] = ac - cc.mu.Unlock() - if stale != nil { - // There is an addrConn alive on ac.addr already. This could be due to - // 1) a buggy Balancer notifies duplicated Addresses; - // 2) goaway was received, a new ac will replace the old ac. - // The old ac should be deleted from cc.conns, but the - // underlying transport should drain rather than close. - if tearDownErr == nil { - // tearDownErr is nil if resetAddrConn is called by - // 1) Dial - // 2) lbWatcher - // In both cases, the stale ac should drain, not close. - stale.tearDown(errConnDrain) - } else { - stale.tearDown(tearDownErr) - } - } - // skipWait may overwrite the decision in ac.dopts.block. - if ac.dopts.block && !skipWait { - if err := ac.resetTransport(false); err != nil { - if err != errConnClosing { - // Tear down ac and delete it from cc.conns. - cc.mu.Lock() - delete(cc.conns, ac.addr) - cc.mu.Unlock() - ac.tearDown(err) - } - if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { - return e.Origin() - } - return err - } - // Start to monitor the error status of transport. - go ac.transportMonitor() - } else { - // Start a goroutine connecting to the server asynchronously. - go func() { - if err := ac.resetTransport(false); err != nil { - grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err) - if err != errConnClosing { - // Keep this ac in cc.conns, to get the reason it's torn down. - ac.tearDown(err) - } - return - } - ac.transportMonitor() - }() - } - return nil -} - -func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) { - var ( - ac *addrConn - ok bool - put func() - ) - if cc.dopts.balancer == nil { - // If balancer is nil, there should be only one addrConn available. - cc.mu.RLock() - if cc.conns == nil { - cc.mu.RUnlock() - return nil, nil, toRPCErr(ErrClientConnClosing) - } - for _, ac = range cc.conns { - // Break after the first iteration to get the first addrConn. - ok = true - break - } - cc.mu.RUnlock() - } else { - var ( - addr Address - err error - ) - addr, put, err = cc.dopts.balancer.Get(ctx, opts) - if err != nil { - return nil, nil, toRPCErr(err) - } - cc.mu.RLock() - if cc.conns == nil { - cc.mu.RUnlock() - return nil, nil, toRPCErr(ErrClientConnClosing) - } - ac, ok = cc.conns[addr] - cc.mu.RUnlock() - } - if !ok { - if put != nil { - put() - } - return nil, nil, errConnClosing - } - t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait) - if err != nil { - if put != nil { - put() - } - return nil, nil, err - } - return t, put, nil -} - -// Close tears down the ClientConn and all underlying connections. -func (cc *ClientConn) Close() error { - cc.cancel() - - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return ErrClientConnClosing - } - conns := cc.conns - cc.conns = nil - cc.mu.Unlock() - if cc.dopts.balancer != nil { - cc.dopts.balancer.Close() - } - for _, ac := range conns { - ac.tearDown(ErrClientConnClosing) - } - return nil -} - -// addrConn is a network connection to a given address. -type addrConn struct { - ctx context.Context - cancel context.CancelFunc - - cc *ClientConn - addr Address - dopts dialOptions - events trace.EventLog - - mu sync.Mutex - state ConnectivityState - stateCV *sync.Cond - down func(error) // the handler called when a connection is down. - // ready is closed and becomes nil when a new transport is up or failed - // due to timeout. - ready chan struct{} - transport transport.ClientTransport - - // The reason this addrConn is torn down. - tearDownErr error -} - -// printf records an event in ac's event log, unless ac has been closed. -// REQUIRES ac.mu is held. -func (ac *addrConn) printf(format string, a ...interface{}) { - if ac.events != nil { - ac.events.Printf(format, a...) - } -} - -// errorf records an error in ac's event log, unless ac has been closed. -// REQUIRES ac.mu is held. -func (ac *addrConn) errorf(format string, a ...interface{}) { - if ac.events != nil { - ac.events.Errorf(format, a...) - } -} - -// getState returns the connectivity state of the Conn -func (ac *addrConn) getState() ConnectivityState { - ac.mu.Lock() - defer ac.mu.Unlock() - return ac.state -} - -// waitForStateChange blocks until the state changes to something other than the sourceState. -func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { - ac.mu.Lock() - defer ac.mu.Unlock() - if sourceState != ac.state { - return ac.state, nil - } - done := make(chan struct{}) - var err error - go func() { - select { - case <-ctx.Done(): - ac.mu.Lock() - err = ctx.Err() - ac.stateCV.Broadcast() - ac.mu.Unlock() - case <-done: - } - }() - defer close(done) - for sourceState == ac.state { - ac.stateCV.Wait() - if err != nil { - return ac.state, err - } - } - return ac.state, nil -} - -func (ac *addrConn) resetTransport(closeTransport bool) error { - for retries := 0; ; retries++ { - ac.mu.Lock() - ac.printf("connecting") - if ac.state == Shutdown { - // ac.tearDown(...) has been invoked. - ac.mu.Unlock() - return errConnClosing - } - if ac.down != nil { - ac.down(downErrorf(false, true, "%v", errNetworkIO)) - ac.down = nil - } - ac.state = Connecting - ac.stateCV.Broadcast() - t := ac.transport - ac.mu.Unlock() - if closeTransport && t != nil { - t.Close() - } - sleepTime := ac.dopts.bs.backoff(retries) - timeout := minConnectTimeout - if timeout < sleepTime { - timeout = sleepTime - } - ctx, cancel := context.WithTimeout(ac.ctx, timeout) - connectTime := time.Now() - newTransport, err := transport.NewClientTransport(ctx, ac.addr.Addr, ac.dopts.copts) - if err != nil { - cancel() - - if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { - return err - } - grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, ac.addr) - ac.mu.Lock() - if ac.state == Shutdown { - // ac.tearDown(...) has been invoked. - ac.mu.Unlock() - return errConnClosing - } - ac.errorf("transient failure: %v", err) - ac.state = TransientFailure - ac.stateCV.Broadcast() - if ac.ready != nil { - close(ac.ready) - ac.ready = nil - } - ac.mu.Unlock() - closeTransport = false - select { - case <-time.After(sleepTime - time.Since(connectTime)): - case <-ac.ctx.Done(): - return ac.ctx.Err() - } - continue - } - ac.mu.Lock() - ac.printf("ready") - if ac.state == Shutdown { - // ac.tearDown(...) has been invoked. - ac.mu.Unlock() - newTransport.Close() - return errConnClosing - } - ac.state = Ready - ac.stateCV.Broadcast() - ac.transport = newTransport - if ac.ready != nil { - close(ac.ready) - ac.ready = nil - } - if ac.cc.dopts.balancer != nil { - ac.down = ac.cc.dopts.balancer.Up(ac.addr) - } - ac.mu.Unlock() - return nil - } -} - -// Run in a goroutine to track the error in transport and create the -// new transport if an error happens. It returns when the channel is closing. -func (ac *addrConn) transportMonitor() { - for { - ac.mu.Lock() - t := ac.transport - ac.mu.Unlock() - select { - // This is needed to detect the teardown when - // the addrConn is idle (i.e., no RPC in flight). - case <-ac.ctx.Done(): - select { - case <-t.Error(): - t.Close() - default: - } - return - case <-t.GoAway(): - // If GoAway happens without any network I/O error, ac is closed without shutting down the - // underlying transport (the transport will be closed when all the pending RPCs finished or - // failed.). - // If GoAway and some network I/O error happen concurrently, ac and its underlying transport - // are closed. - // In both cases, a new ac is created. - select { - case <-t.Error(): - ac.cc.resetAddrConn(ac.addr, true, errNetworkIO) - default: - ac.cc.resetAddrConn(ac.addr, true, errConnDrain) - } - return - case <-t.Error(): - select { - case <-ac.ctx.Done(): - t.Close() - return - case <-t.GoAway(): - ac.cc.resetAddrConn(ac.addr, true, errNetworkIO) - return - default: - } - ac.mu.Lock() - if ac.state == Shutdown { - // ac has been shutdown. - ac.mu.Unlock() - return - } - ac.state = TransientFailure - ac.stateCV.Broadcast() - ac.mu.Unlock() - if err := ac.resetTransport(true); err != nil { - ac.mu.Lock() - ac.printf("transport exiting: %v", err) - ac.mu.Unlock() - grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err) - if err != errConnClosing { - // Keep this ac in cc.conns, to get the reason it's torn down. - ac.tearDown(err) - } - return - } - } - } -} - -// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or -// iv) transport is in TransientFailure and there's no balancer/failfast is true. -func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) { - for { - ac.mu.Lock() - switch { - case ac.state == Shutdown: - if failfast || !hasBalancer { - // RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr. - err := ac.tearDownErr - ac.mu.Unlock() - return nil, err - } - ac.mu.Unlock() - return nil, errConnClosing - case ac.state == Ready: - ct := ac.transport - ac.mu.Unlock() - return ct, nil - case ac.state == TransientFailure: - if failfast || hasBalancer { - ac.mu.Unlock() - return nil, errConnUnavailable - } - } - ready := ac.ready - if ready == nil { - ready = make(chan struct{}) - ac.ready = ready - } - ac.mu.Unlock() - select { - case <-ctx.Done(): - return nil, toRPCErr(ctx.Err()) - // Wait until the new transport is ready or failed. - case <-ready: - } - } -} - -// tearDown starts to tear down the addrConn. -// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in -// some edge cases (e.g., the caller opens and closes many addrConn's in a -// tight loop. -// tearDown doesn't remove ac from ac.cc.conns. -func (ac *addrConn) tearDown(err error) { - ac.cancel() - - ac.mu.Lock() - defer ac.mu.Unlock() - if ac.down != nil { - ac.down(downErrorf(false, false, "%v", err)) - ac.down = nil - } - if err == errConnDrain && ac.transport != nil { - // GracefulClose(...) may be executed multiple times when - // i) receiving multiple GoAway frames from the server; or - // ii) there are concurrent name resolver/Balancer triggered - // address removal and GoAway. - ac.transport.GracefulClose() - } - if ac.state == Shutdown { - return - } - ac.state = Shutdown - ac.tearDownErr = err - ac.stateCV.Broadcast() - if ac.events != nil { - ac.events.Finish() - ac.events = nil - } - if ac.ready != nil { - close(ac.ready) - ac.ready = nil - } - if ac.transport != nil && err != errConnDrain { - ac.transport.Close() - } - return -} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh deleted file mode 100755 index b00948884295..000000000000 --- a/vendor/google.golang.org/grpc/codegen.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -# This script serves as an example to demonstrate how to generate the gRPC-Go -# interface and the related messages from .proto file. -# -# It assumes the installation of i) Google proto buffer compiler at -# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen -# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have -# not, please install them first. -# -# We recommend running this script at $GOPATH/src. -# -# If this is not what you need, feel free to make your own scripts. Again, this -# script is for demonstration purpose. -# -proto=$1 -protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/BUILD b/vendor/google.golang.org/grpc/codes/BUILD deleted file mode 100644 index a0c9bc3dbc03..000000000000 --- a/vendor/google.golang.org/grpc/codes/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "code_string.go", - "codes.go", - ], - importpath = "google.golang.org/grpc/codes", -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go deleted file mode 100644 index e6762d084558..000000000000 --- a/vendor/google.golang.org/grpc/codes/code_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type=Code; DO NOT EDIT - -package codes - -import "fmt" - -const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated" - -var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192} - -func (i Code) String() string { - if i+1 >= Code(len(_Code_index)) { - return fmt.Sprintf("Code(%d)", i) - } - return _Code_name[_Code_index[i]:_Code_index[i+1]] -} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go deleted file mode 100644 index 37c5b860bd6d..000000000000 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ /dev/null @@ -1,159 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package codes defines the canonical error codes used by gRPC. It is -// consistent across various languages. -package codes - -// A Code is an unsigned 32-bit error code as defined in the gRPC spec. -type Code uint32 - -//go:generate stringer -type=Code - -const ( - // OK is returned on success. - OK Code = 0 - - // Canceled indicates the operation was cancelled (typically by the caller). - Canceled Code = 1 - - // Unknown error. An example of where this error may be returned is - // if a Status value received from another address space belongs to - // an error-space that is not known in this address space. Also - // errors raised by APIs that do not return enough error information - // may be converted to this error. - Unknown Code = 2 - - // InvalidArgument indicates client specified an invalid argument. - // Note that this differs from FailedPrecondition. It indicates arguments - // that are problematic regardless of the state of the system - // (e.g., a malformed file name). - InvalidArgument Code = 3 - - // DeadlineExceeded means operation expired before completion. - // For operations that change the state of the system, this error may be - // returned even if the operation has completed successfully. For - // example, a successful response from a server could have been delayed - // long enough for the deadline to expire. - DeadlineExceeded Code = 4 - - // NotFound means some requested entity (e.g., file or directory) was - // not found. - NotFound Code = 5 - - // AlreadyExists means an attempt to create an entity failed because one - // already exists. - AlreadyExists Code = 6 - - // PermissionDenied indicates the caller does not have permission to - // execute the specified operation. It must not be used for rejections - // caused by exhausting some resource (use ResourceExhausted - // instead for those errors). It must not be - // used if the caller cannot be identified (use Unauthenticated - // instead for those errors). - PermissionDenied Code = 7 - - // Unauthenticated indicates the request does not have valid - // authentication credentials for the operation. - Unauthenticated Code = 16 - - // ResourceExhausted indicates some resource has been exhausted, perhaps - // a per-user quota, or perhaps the entire file system is out of space. - ResourceExhausted Code = 8 - - // FailedPrecondition indicates operation was rejected because the - // system is not in a state required for the operation's execution. - // For example, directory to be deleted may be non-empty, an rmdir - // operation is applied to a non-directory, etc. - // - // A litmus test that may help a service implementor in deciding - // between FailedPrecondition, Aborted, and Unavailable: - // (a) Use Unavailable if the client can retry just the failing call. - // (b) Use Aborted if the client should retry at a higher-level - // (e.g., restarting a read-modify-write sequence). - // (c) Use FailedPrecondition if the client should not retry until - // the system state has been explicitly fixed. E.g., if an "rmdir" - // fails because the directory is non-empty, FailedPrecondition - // should be returned since the client should not retry unless - // they have first fixed up the directory by deleting files from it. - // (d) Use FailedPrecondition if the client performs conditional - // REST Get/Update/Delete on a resource and the resource on the - // server does not match the condition. E.g., conflicting - // read-modify-write on the same resource. - FailedPrecondition Code = 9 - - // Aborted indicates the operation was aborted, typically due to a - // concurrency issue like sequencer check failures, transaction aborts, - // etc. - // - // See litmus test above for deciding between FailedPrecondition, - // Aborted, and Unavailable. - Aborted Code = 10 - - // OutOfRange means operation was attempted past the valid range. - // E.g., seeking or reading past end of file. - // - // Unlike InvalidArgument, this error indicates a problem that may - // be fixed if the system state changes. For example, a 32-bit file - // system will generate InvalidArgument if asked to read at an - // offset that is not in the range [0,2^32-1], but it will generate - // OutOfRange if asked to read from an offset past the current - // file size. - // - // There is a fair bit of overlap between FailedPrecondition and - // OutOfRange. We recommend using OutOfRange (the more specific - // error) when it applies so that callers who are iterating through - // a space can easily look for an OutOfRange error to detect when - // they are done. - OutOfRange Code = 11 - - // Unimplemented indicates operation is not implemented or not - // supported/enabled in this service. - Unimplemented Code = 12 - - // Internal errors. Means some invariants expected by underlying - // system has been broken. If you see one of these errors, - // something is very broken. - Internal Code = 13 - - // Unavailable indicates the service is currently unavailable. - // This is a most likely a transient condition and may be corrected - // by retrying with a backoff. - // - // See litmus test above for deciding between FailedPrecondition, - // Aborted, and Unavailable. - Unavailable Code = 14 - - // DataLoss indicates unrecoverable data loss or corruption. - DataLoss Code = 15 -) diff --git a/vendor/google.golang.org/grpc/coverage.sh b/vendor/google.golang.org/grpc/coverage.sh deleted file mode 100755 index 120235374a42..000000000000 --- a/vendor/google.golang.org/grpc/coverage.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -set -e - -workdir=.cover -profile="$workdir/cover.out" -mode=set -end2endtest="google.golang.org/grpc/test" - -generate_cover_data() { - rm -rf "$workdir" - mkdir "$workdir" - - for pkg in "$@"; do - if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ] - then - f="$workdir/$(echo $pkg | tr / -)" - go test -covermode="$mode" -coverprofile="$f.cover" "$pkg" - go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest" - fi - done - - echo "mode: $mode" >"$profile" - grep -h -v "^mode:" "$workdir"/*.cover >>"$profile" -} - -show_cover_report() { - go tool cover -${1}="$profile" -} - -push_to_coveralls() { - goveralls -coverprofile="$profile" -} - -generate_cover_data $(go list ./...) -show_cover_report func -case "$1" in -"") - ;; ---html) - show_cover_report html ;; ---coveralls) - push_to_coveralls ;; -*) - echo >&2 "error: invalid option: $1" ;; -esac -rm -rf "$workdir" diff --git a/vendor/google.golang.org/grpc/credentials/BUILD b/vendor/google.golang.org/grpc/credentials/BUILD deleted file mode 100644 index 17bfe6f62cb8..000000000000 --- a/vendor/google.golang.org/grpc/credentials/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "credentials.go", - "credentials_util_go17.go", - "credentials_util_pre_go17.go", - ], - importpath = "google.golang.org/grpc/credentials", - deps = ["//vendor/golang.org/x/net/context:go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//vendor/google.golang.org/grpc/credentials/oauth:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go deleted file mode 100644 index affe23609b46..000000000000 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ /dev/null @@ -1,227 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package credentials implements various credentials supported by gRPC library, -// which encapsulate all the state needed by a client to authenticate with a -// server and make various assertions, e.g., about the client's identity, role, -// or whether it is authorized to make a particular call. -package credentials - -import ( - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "io/ioutil" - "net" - "strings" - - "golang.org/x/net/context" -) - -var ( - // alpnProtoStr are the specified application level protocols for gRPC. - alpnProtoStr = []string{"h2"} -) - -// PerRPCCredentials defines the common interface for the credentials which need to -// attach security information to every RPC (e.g., oauth2). -type PerRPCCredentials interface { - // GetRequestMetadata gets the current request metadata, refreshing - // tokens if required. This should be called by the transport layer on - // each request, and the data should be populated in headers or other - // context. uri is the URI of the entry point for the request. When - // supported by the underlying implementation, ctx can be used for - // timeout and cancellation. - // TODO(zhaoq): Define the set of the qualified keys instead of leaving - // it as an arbitrary string. - GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) - // RequireTransportSecurity indicates whether the credentials requires - // transport security. - RequireTransportSecurity() bool -} - -// ProtocolInfo provides information regarding the gRPC wire protocol version, -// security protocol, security protocol version in use, server name, etc. -type ProtocolInfo struct { - // ProtocolVersion is the gRPC wire protocol version. - ProtocolVersion string - // SecurityProtocol is the security protocol in use. - SecurityProtocol string - // SecurityVersion is the security protocol version. - SecurityVersion string - // ServerName is the user-configured server name. - ServerName string -} - -// AuthInfo defines the common interface for the auth information the users are interested in. -type AuthInfo interface { - AuthType() string -} - -var ( - // ErrConnDispatched indicates that rawConn has been dispatched out of gRPC - // and the caller should not close rawConn. - ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") -) - -// TransportCredentials defines the common interface for all the live gRPC wire -// protocols and supported transport security protocols (e.g., TLS, SSL). -type TransportCredentials interface { - // ClientHandshake does the authentication handshake specified by the corresponding - // authentication protocol on rawConn for clients. It returns the authenticated - // connection and the corresponding auth information about the connection. - // Implementations must use the provided context to implement timely cancellation. - ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) - // ServerHandshake does the authentication handshake for servers. It returns - // the authenticated connection and the corresponding auth information about - // the connection. - ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) - // Info provides the ProtocolInfo of this TransportCredentials. - Info() ProtocolInfo -} - -// TLSInfo contains the auth information for a TLS authenticated connection. -// It implements the AuthInfo interface. -type TLSInfo struct { - State tls.ConnectionState -} - -// AuthType returns the type of TLSInfo as a string. -func (t TLSInfo) AuthType() string { - return "tls" -} - -// tlsCreds is the credentials required for authenticating a connection using TLS. -type tlsCreds struct { - // TLS configuration - config *tls.Config -} - -func (c tlsCreds) Info() ProtocolInfo { - return ProtocolInfo{ - SecurityProtocol: "tls", - SecurityVersion: "1.2", - ServerName: c.config.ServerName, - } -} - -// GetRequestMetadata returns nil, nil since TLS credentials does not have -// metadata. -func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - return nil, nil -} - -func (c *tlsCreds) RequireTransportSecurity() bool { - return true -} - -func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { - // use local cfg to avoid clobbering ServerName if using multiple endpoints - cfg := cloneTLSConfig(c.config) - if cfg.ServerName == "" { - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - cfg.ServerName = addr[:colonPos] - } - conn := tls.Client(rawConn, cfg) - errChannel := make(chan error, 1) - go func() { - errChannel <- conn.Handshake() - }() - select { - case err := <-errChannel: - if err != nil { - return nil, nil, err - } - case <-ctx.Done(): - return nil, nil, ctx.Err() - } - // TODO(zhaoq): Omit the auth info for client now. It is more for - // information than anything else. - return conn, nil, nil -} - -func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { - conn := tls.Server(rawConn, c.config) - if err := conn.Handshake(); err != nil { - return nil, nil, err - } - return conn, TLSInfo{conn.ConnectionState()}, nil -} - -// NewTLS uses c to construct a TransportCredentials based on TLS. -func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{cloneTLSConfig(c)} - tc.config.NextProtos = alpnProtoStr - return tc -} - -// NewClientTLSFromCert constructs a TLS from the input certificate for client. -// serverNameOverwrite is for testing only. If set to a non empty string, -// it will overwrite the virtual host name of authority (e.g. :authority header field) in requests. -func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverwrite string) TransportCredentials { - return NewTLS(&tls.Config{ServerName: serverNameOverwrite, RootCAs: cp}) -} - -// NewClientTLSFromFile constructs a TLS from the input certificate file for client. -// serverNameOverwrite is for testing only. If set to a non empty string, -// it will overwrite the virtual host name of authority (e.g. :authority header field) in requests. -func NewClientTLSFromFile(certFile, serverNameOverwrite string) (TransportCredentials, error) { - b, err := ioutil.ReadFile(certFile) - if err != nil { - return nil, err - } - cp := x509.NewCertPool() - if !cp.AppendCertsFromPEM(b) { - return nil, fmt.Errorf("credentials: failed to append certificates") - } - return NewTLS(&tls.Config{ServerName: serverNameOverwrite, RootCAs: cp}), nil -} - -// NewServerTLSFromCert constructs a TLS from the input certificate for server. -func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { - return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) -} - -// NewServerTLSFromFile constructs a TLS from the input certificate file and key -// file for server. -func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return nil, err - } - return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil -} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go deleted file mode 100644 index 9647b9ec83ae..000000000000 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build go1.7 - -/* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package credentials - -import ( - "crypto/tls" -) - -// cloneTLSConfig returns a shallow clone of the exported -// fields of cfg, ignoring the unexported sync.Once, which -// contains a mutex and must not be copied. -// -// If cfg is nil, a new zero tls.Config is returned. -// -// TODO replace this function with official clone function. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return &tls.Config{ - Rand: cfg.Rand, - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - SessionTicketsDisabled: cfg.SessionTicketsDisabled, - SessionTicketKey: cfg.SessionTicketKey, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled, - Renegotiation: cfg.Renegotiation, - } -} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go deleted file mode 100644 index 09b8d12c79b7..000000000000 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go +++ /dev/null @@ -1,74 +0,0 @@ -// +build !go1.7 - -/* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package credentials - -import ( - "crypto/tls" -) - -// cloneTLSConfig returns a shallow clone of the exported -// fields of cfg, ignoring the unexported sync.Once, which -// contains a mutex and must not be copied. -// -// If cfg is nil, a new zero tls.Config is returned. -// -// TODO replace this function with official clone function. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return &tls.Config{ - Rand: cfg.Rand, - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - SessionTicketsDisabled: cfg.SessionTicketsDisabled, - SessionTicketKey: cfg.SessionTicketKey, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - } -} diff --git a/vendor/google.golang.org/grpc/credentials/oauth/BUILD b/vendor/google.golang.org/grpc/credentials/oauth/BUILD deleted file mode 100644 index 6bc5f99e598b..000000000000 --- a/vendor/google.golang.org/grpc/credentials/oauth/BUILD +++ /dev/null @@ -1,32 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["oauth.go"], - importpath = "google.golang.org/grpc/credentials/oauth", - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/golang.org/x/oauth2:go_default_library", - "//vendor/golang.org/x/oauth2/google:go_default_library", - "//vendor/golang.org/x/oauth2/jwt:go_default_library", - "//vendor/google.golang.org/grpc/credentials:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go deleted file mode 100644 index 8e68c4d73b35..000000000000 --- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go +++ /dev/null @@ -1,180 +0,0 @@ -/* - * - * Copyright 2015, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package oauth implements gRPC credentials using OAuth. -package oauth - -import ( - "fmt" - "io/ioutil" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "golang.org/x/oauth2/jwt" - "google.golang.org/grpc/credentials" -) - -// TokenSource supplies PerRPCCredentials from an oauth2.TokenSource. -type TokenSource struct { - oauth2.TokenSource -} - -// GetRequestMetadata gets the request metadata as a map from a TokenSource. -func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - token, err := ts.Token() - if err != nil { - return nil, err - } - return map[string]string{ - "authorization": token.Type() + " " + token.AccessToken, - }, nil -} - -// RequireTransportSecurity indicates whether the credentails requires transport security. -func (ts TokenSource) RequireTransportSecurity() bool { - return true -} - -type jwtAccess struct { - jsonKey []byte -} - -// NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile. -func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) { - jsonKey, err := ioutil.ReadFile(keyFile) - if err != nil { - return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) - } - return NewJWTAccessFromKey(jsonKey) -} - -// NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey. -func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) { - return jwtAccess{jsonKey}, nil -} - -func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0]) - if err != nil { - return nil, err - } - token, err := ts.Token() - if err != nil { - return nil, err - } - return map[string]string{ - "authorization": token.TokenType + " " + token.AccessToken, - }, nil -} - -func (j jwtAccess) RequireTransportSecurity() bool { - return true -} - -// oauthAccess supplies PerRPCCredentials from a given token. -type oauthAccess struct { - token oauth2.Token -} - -// NewOauthAccess constructs the PerRPCCredentials using a given token. -func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { - return oauthAccess{token: *token} -} - -func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - return map[string]string{ - "authorization": oa.token.TokenType + " " + oa.token.AccessToken, - }, nil -} - -func (oa oauthAccess) RequireTransportSecurity() bool { - return true -} - -// NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from -// Google Compute Engine (GCE)'s metadata server. It is only valid to use this -// if your program is running on a GCE instance. -// TODO(dsymonds): Deprecate and remove this. -func NewComputeEngine() credentials.PerRPCCredentials { - return TokenSource{google.ComputeTokenSource("")} -} - -// serviceAccount represents PerRPCCredentials via JWT signing key. -type serviceAccount struct { - config *jwt.Config -} - -func (s serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - token, err := s.config.TokenSource(ctx).Token() - if err != nil { - return nil, err - } - return map[string]string{ - "authorization": token.TokenType + " " + token.AccessToken, - }, nil -} - -func (s serviceAccount) RequireTransportSecurity() bool { - return true -} - -// NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice -// from a Google Developers service account. -func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) { - config, err := google.JWTConfigFromJSON(jsonKey, scope...) - if err != nil { - return nil, err - } - return serviceAccount{config: config}, nil -} - -// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file -// of a Google Developers service account. -func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) { - jsonKey, err := ioutil.ReadFile(keyFile) - if err != nil { - return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) - } - return NewServiceAccountFromKey(jsonKey, scope...) -} - -// NewApplicationDefault returns "Application Default Credentials". For more -// detail, see https://developers.google.com/accounts/docs/application-default-credentials. -func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) { - t, err := google.DefaultTokenSource(ctx, scope...) - if err != nil { - return nil, err - } - return TokenSource{t}, nil -} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go deleted file mode 100644 index b4c0e740e9c7..000000000000 --- a/vendor/google.golang.org/grpc/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -/* -Package grpc implements an RPC system called gRPC. - -See www.grpc.io for more information about gRPC. -*/ -package grpc diff --git a/vendor/google.golang.org/grpc/grpclog/BUILD b/vendor/google.golang.org/grpc/grpclog/BUILD deleted file mode 100644 index 861651a9de87..000000000000 --- a/vendor/google.golang.org/grpc/grpclog/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["logger.go"], - importpath = "google.golang.org/grpc/grpclog", -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go deleted file mode 100644 index 2cc09be48945..000000000000 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ /dev/null @@ -1,93 +0,0 @@ -/* - * - * Copyright 2015, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -/* -Package grpclog defines logging for grpc. -*/ -package grpclog - -import ( - "log" - "os" -) - -// Use golang's standard logger by default. -// Access is not mutex-protected: do not modify except in init() -// functions. -var logger Logger = log.New(os.Stderr, "", log.LstdFlags) - -// Logger mimics golang's standard Logger as an interface. -type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) -} - -// SetLogger sets the logger that is used in grpc. Call only from -// init() functions. -func SetLogger(l Logger) { - logger = l -} - -// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code. -func Fatal(args ...interface{}) { - logger.Fatal(args...) -} - -// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code. -func Fatalf(format string, args ...interface{}) { - logger.Fatalf(format, args...) -} - -// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code. -func Fatalln(args ...interface{}) { - logger.Fatalln(args...) -} - -// Print prints to the logger. Arguments are handled in the manner of fmt.Print. -func Print(args ...interface{}) { - logger.Print(args...) -} - -// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. -func Printf(format string, args ...interface{}) { - logger.Printf(format, args...) -} - -// Println prints to the logger. Arguments are handled in the manner of fmt.Println. -func Println(args ...interface{}) { - logger.Println(args...) -} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go deleted file mode 100644 index 8d932efed7e3..000000000000 --- a/vendor/google.golang.org/grpc/interceptor.go +++ /dev/null @@ -1,90 +0,0 @@ -/* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "golang.org/x/net/context" -) - -// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. -type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error - -// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. inovker is the handler to complete the RPC -// and it is the responsibility of the interceptor to call it. -// This is the EXPERIMENTAL API. -type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error - -// Streamer is called by StreamClientInterceptor to create a ClientStream. -type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) - -// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O -// operations. streamer is the handlder to create a ClientStream and it is the responsibility of the interceptor to call it. -// This is the EXPERIMENTAL API. -type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) - -// UnaryServerInfo consists of various information about a unary RPC on -// server side. All per-rpc information may be mutated by the interceptor. -type UnaryServerInfo struct { - // Server is the service implementation the user provides. This is read-only. - Server interface{} - // FullMethod is the full RPC method string, i.e., /package.service/method. - FullMethod string -} - -// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. -type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) - -// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info -// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper -// of the service method implementation. It is the responsibility of the interceptor to invoke handler -// to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) - -// StreamServerInfo consists of various information about a streaming RPC on -// server side. All per-rpc information may be mutated by the interceptor. -type StreamServerInfo struct { - // FullMethod is the full RPC method string, i.e., /package.service/method. - FullMethod string - // IsClientStream indicates whether the RPC is a client streaming RPC. - IsClientStream bool - // IsServerStream indicates whether the RPC is a server streaming RPC. - IsServerStream bool -} - -// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. -// info contains all the information of this RPC the interceptor can operate on. And handler is the -// service method implementation. It is the responsibility of the interceptor to invoke handler to -// complete the RPC. -type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/BUILD b/vendor/google.golang.org/grpc/internal/BUILD deleted file mode 100644 index ae8b2507d9c5..000000000000 --- a/vendor/google.golang.org/grpc/internal/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["internal.go"], - importpath = "google.golang.org/grpc/internal", -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go deleted file mode 100644 index 5489143a85c1..000000000000 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package internal contains gRPC-internal code for testing, to avoid polluting -// the godoc of the top-level grpc package. -package internal - -// TestingCloseConns closes all existing transports but keeps -// grpcServer.lis accepting new connections. -// -// The provided grpcServer must be of type *grpc.Server. It is untyped -// for circular dependency reasons. -var TestingCloseConns func(grpcServer interface{}) - -// TestingUseHandlerImpl enables the http.Handler-based server implementation. -// It must be called before Serve and requires TLS credentials. -// -// The provided grpcServer must be of type *grpc.Server. It is untyped -// for circular dependency reasons. -var TestingUseHandlerImpl func(grpcServer interface{}) diff --git a/vendor/google.golang.org/grpc/metadata/BUILD b/vendor/google.golang.org/grpc/metadata/BUILD deleted file mode 100644 index 8a1739e96c26..000000000000 --- a/vendor/google.golang.org/grpc/metadata/BUILD +++ /dev/null @@ -1,26 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["metadata.go"], - importpath = "google.golang.org/grpc/metadata", - deps = ["//vendor/golang.org/x/net/context:go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go deleted file mode 100644 index b3200156826c..000000000000 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ /dev/null @@ -1,140 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package metadata define the structure of the metadata supported by gRPC library. -package metadata - -import ( - "encoding/base64" - "fmt" - "strings" - - "golang.org/x/net/context" -) - -const ( - binHdrSuffix = "-bin" -) - -// encodeKeyValue encodes key and value qualified for transmission via gRPC. -// Transmitting binary headers violates HTTP/2 spec. -// TODO(zhaoq): Maybe check if k is ASCII also. -func encodeKeyValue(k, v string) (string, string) { - k = strings.ToLower(k) - if strings.HasSuffix(k, binHdrSuffix) { - val := base64.StdEncoding.EncodeToString([]byte(v)) - v = string(val) - } - return k, v -} - -// DecodeKeyValue returns the original key and value corresponding to the -// encoded data in k, v. -// If k is a binary header and v contains comma, v is split on comma before decoded, -// and the decoded v will be joined with comma before returned. -func DecodeKeyValue(k, v string) (string, string, error) { - if !strings.HasSuffix(k, binHdrSuffix) { - return k, v, nil - } - vvs := strings.Split(v, ",") - for i, vv := range vvs { - val, err := base64.StdEncoding.DecodeString(vv) - if err != nil { - return "", "", err - } - vvs[i] = string(val) - } - return k, strings.Join(vvs, ","), nil -} - -// MD is a mapping from metadata keys to values. Users should use the following -// two convenience functions New and Pairs to generate MD. -type MD map[string][]string - -// New creates a MD from given key-value map. -func New(m map[string]string) MD { - md := MD{} - for k, v := range m { - key, val := encodeKeyValue(k, v) - md[key] = append(md[key], val) - } - return md -} - -// Pairs returns an MD formed by the mapping of key, value ... -// Pairs panics if len(kv) is odd. -func Pairs(kv ...string) MD { - if len(kv)%2 == 1 { - panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) - } - md := MD{} - var k string - for i, s := range kv { - if i%2 == 0 { - k = s - continue - } - key, val := encodeKeyValue(k, s) - md[key] = append(md[key], val) - } - return md -} - -// Len returns the number of items in md. -func (md MD) Len() int { - return len(md) -} - -// Copy returns a copy of md. -func (md MD) Copy() MD { - out := MD{} - for k, v := range md { - for _, i := range v { - out[k] = append(out[k], i) - } - } - return out -} - -type mdKey struct{} - -// NewContext creates a new context with md attached. -func NewContext(ctx context.Context, md MD) context.Context { - return context.WithValue(ctx, mdKey{}, md) -} - -// FromContext returns the MD in ctx if it exists. -func FromContext(ctx context.Context) (md MD, ok bool) { - md, ok = ctx.Value(mdKey{}).(MD) - return -} diff --git a/vendor/google.golang.org/grpc/naming/BUILD b/vendor/google.golang.org/grpc/naming/BUILD deleted file mode 100644 index 9d3c305415fa..000000000000 --- a/vendor/google.golang.org/grpc/naming/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["naming.go"], - importpath = "google.golang.org/grpc/naming", -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go deleted file mode 100644 index c2e0871e6f8b..000000000000 --- a/vendor/google.golang.org/grpc/naming/naming.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package naming defines the naming API and related data structures for gRPC. -// The interface is EXPERIMENTAL and may be suject to change. -package naming - -// Operation defines the corresponding operations for a name resolution change. -type Operation uint8 - -const ( - // Add indicates a new address is added. - Add Operation = iota - // Delete indicates an exisiting address is deleted. - Delete -) - -// Update defines a name resolution update. Notice that it is not valid having both -// empty string Addr and nil Metadata in an Update. -type Update struct { - // Op indicates the operation of the update. - Op Operation - // Addr is the updated address. It is empty string if there is no address update. - Addr string - // Metadata is the updated metadata. It is nil if there is no metadata update. - // Metadata is not required for a custom naming implementation. - Metadata interface{} -} - -// Resolver creates a Watcher for a target to track its resolution changes. -type Resolver interface { - // Resolve creates a Watcher for target. - Resolve(target string) (Watcher, error) -} - -// Watcher watches for the updates on the specified target. -type Watcher interface { - // Next blocks until an update or error happens. It may return one or more - // updates. The first call should get the full set of the results. It should - // return an error if and only if Watcher cannot recover. - Next() ([]*Update, error) - // Close closes the Watcher. - Close() -} diff --git a/vendor/google.golang.org/grpc/peer/BUILD b/vendor/google.golang.org/grpc/peer/BUILD deleted file mode 100644 index 20fbd259f6ce..000000000000 --- a/vendor/google.golang.org/grpc/peer/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["peer.go"], - importpath = "google.golang.org/grpc/peer", - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc/credentials:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go deleted file mode 100644 index bfa6205ba9ea..000000000000 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package peer defines various peer information associated with RPCs and -// corresponding utils. -package peer - -import ( - "net" - - "golang.org/x/net/context" - "google.golang.org/grpc/credentials" -) - -// Peer contains the information of the peer for an RPC. -type Peer struct { - // Addr is the peer address. - Addr net.Addr - // AuthInfo is the authentication information of the transport. - // It is nil if there is no transport security being used. - AuthInfo credentials.AuthInfo -} - -type peerKey struct{} - -// NewContext creates a new context with peer information attached. -func NewContext(ctx context.Context, p *Peer) context.Context { - return context.WithValue(ctx, peerKey{}, p) -} - -// FromContext returns the peer information in ctx if it exists. -func FromContext(ctx context.Context) (p *Peer, ok bool) { - p, ok = ctx.Value(peerKey{}).(*Peer) - return -} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go deleted file mode 100644 index 6b60095d564a..000000000000 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ /dev/null @@ -1,457 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "bytes" - "compress/gzip" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "math" - "os" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/transport" -) - -// Codec defines the interface gRPC uses to encode and decode messages. -type Codec interface { - // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) - // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error - // String returns the name of the Codec implementation. The returned - // string will be used as part of content type in transmission. - String() string -} - -// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC. -type protoCodec struct{} - -func (protoCodec) Marshal(v interface{}) ([]byte, error) { - return proto.Marshal(v.(proto.Message)) -} - -func (protoCodec) Unmarshal(data []byte, v interface{}) error { - return proto.Unmarshal(data, v.(proto.Message)) -} - -func (protoCodec) String() string { - return "proto" -} - -// Compressor defines the interface gRPC uses to compress a message. -type Compressor interface { - // Do compresses p into w. - Do(w io.Writer, p []byte) error - // Type returns the compression algorithm the Compressor uses. - Type() string -} - -// NewGZIPCompressor creates a Compressor based on GZIP. -func NewGZIPCompressor() Compressor { - return &gzipCompressor{} -} - -type gzipCompressor struct { -} - -func (c *gzipCompressor) Do(w io.Writer, p []byte) error { - z := gzip.NewWriter(w) - if _, err := z.Write(p); err != nil { - return err - } - return z.Close() -} - -func (c *gzipCompressor) Type() string { - return "gzip" -} - -// Decompressor defines the interface gRPC uses to decompress a message. -type Decompressor interface { - // Do reads the data from r and uncompress them. - Do(r io.Reader) ([]byte, error) - // Type returns the compression algorithm the Decompressor uses. - Type() string -} - -type gzipDecompressor struct { -} - -// NewGZIPDecompressor creates a Decompressor based on GZIP. -func NewGZIPDecompressor() Decompressor { - return &gzipDecompressor{} -} - -func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { - z, err := gzip.NewReader(r) - if err != nil { - return nil, err - } - defer z.Close() - return ioutil.ReadAll(z) -} - -func (d *gzipDecompressor) Type() string { - return "gzip" -} - -// callInfo contains all related configuration and information about an RPC. -type callInfo struct { - failFast bool - headerMD metadata.MD - trailerMD metadata.MD - traceInfo traceInfo // in trace.go -} - -var defaultCallInfo = callInfo{failFast: true} - -// CallOption configures a Call before it starts or extracts information from -// a Call after it completes. -type CallOption interface { - // before is called before the call is sent to any server. If before - // returns a non-nil error, the RPC fails with that error. - before(*callInfo) error - - // after is called after the call has completed. after cannot return an - // error, so any failures should be reported via output parameters. - after(*callInfo) -} - -type beforeCall func(c *callInfo) error - -func (o beforeCall) before(c *callInfo) error { return o(c) } -func (o beforeCall) after(c *callInfo) {} - -type afterCall func(c *callInfo) - -func (o afterCall) before(c *callInfo) error { return nil } -func (o afterCall) after(c *callInfo) { o(c) } - -// Header returns a CallOptions that retrieves the header metadata -// for a unary RPC. -func Header(md *metadata.MD) CallOption { - return afterCall(func(c *callInfo) { - *md = c.headerMD - }) -} - -// Trailer returns a CallOptions that retrieves the trailer metadata -// for a unary RPC. -func Trailer(md *metadata.MD) CallOption { - return afterCall(func(c *callInfo) { - *md = c.trailerMD - }) -} - -// FailFast configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If failfast is true, the RPC will fail -// immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will retry -// the call if it fails due to a transient error. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md -func FailFast(failFast bool) CallOption { - return beforeCall(func(c *callInfo) error { - c.failFast = failFast - return nil - }) -} - -// The format of the payload: compressed or not? -type payloadFormat uint8 - -const ( - compressionNone payloadFormat = iota // no compression - compressionMade -) - -// parser reads complete gRPC messages from the underlying reader. -type parser struct { - // r is the underlying reader. - // See the comment on recvMsg for the permissible - // error types. - r io.Reader - - // The header of a gRPC message. Find more detail - // at http://www.grpc.io/docs/guides/wire.html. - header [5]byte -} - -// recvMsg reads a complete gRPC message from the stream. -// -// It returns the message and its payload (compression/encoding) -// format. The caller owns the returned msg memory. -// -// If there is an error, possible values are: -// * io.EOF, when no messages remain -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * of type transport.StreamError -// No other error values or types must be returned, which also means -// that the underlying io.Reader must not return an incompatible -// error. -func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := io.ReadFull(p.r, p.header[:]); err != nil { - return 0, nil, err - } - - pf = payloadFormat(p.header[0]) - length := binary.BigEndian.Uint32(p.header[1:]) - - if length == 0 { - return pf, nil, nil - } - if length > uint32(maxMsgSize) { - return 0, nil, Errorf(codes.Internal, "grpc: received message length %d exceeding the max size %d", length, maxMsgSize) - } - // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead - // of making it for each message: - msg = make([]byte, int(length)) - if _, err := io.ReadFull(p.r, msg); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return 0, nil, err - } - return pf, msg, nil -} - -// encode serializes msg and prepends the message header. If msg is nil, it -// generates the message header of 0 message length. -func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte, error) { - var b []byte - var length uint - if msg != nil { - var err error - // TODO(zhaoq): optimize to reduce memory alloc and copying. - b, err = c.Marshal(msg) - if err != nil { - return nil, err - } - if cp != nil { - if err := cp.Do(cbuf, b); err != nil { - return nil, err - } - b = cbuf.Bytes() - } - length = uint(len(b)) - } - if length > math.MaxUint32 { - return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length) - } - - const ( - payloadLen = 1 - sizeLen = 4 - ) - - var buf = make([]byte, payloadLen+sizeLen+len(b)) - - // Write payload format - if cp == nil { - buf[0] = byte(compressionNone) - } else { - buf[0] = byte(compressionMade) - } - // Write length of b into buf - binary.BigEndian.PutUint32(buf[1:], uint32(length)) - // Copy encoded msg to buf - copy(buf[5:], b) - - return buf, nil -} - -func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error { - switch pf { - case compressionNone: - case compressionMade: - if dc == nil || recvCompress != dc.Type() { - return Errorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) - } - default: - return Errorf(codes.Internal, "grpc: received unexpected payload format %d", pf) - } - return nil -} - -func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int) error { - pf, d, err := p.recvMsg(maxMsgSize) - if err != nil { - return err - } - if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil { - return err - } - if pf == compressionMade { - d, err = dc.Do(bytes.NewReader(d)) - if err != nil { - return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) - } - } - if len(d) > maxMsgSize { - // TODO: Revisit the error code. Currently keep it consistent with java - // implementation. - return Errorf(codes.Internal, "grpc: received a message of %d bytes exceeding %d limit", len(d), maxMsgSize) - } - if err := c.Unmarshal(d, m); err != nil { - return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) - } - return nil -} - -// rpcError defines the status from an RPC. -type rpcError struct { - code codes.Code - desc string -} - -func (e *rpcError) Error() string { - return fmt.Sprintf("rpc error: code = %d desc = %s", e.code, e.desc) -} - -// Code returns the error code for err if it was produced by the rpc system. -// Otherwise, it returns codes.Unknown. -func Code(err error) codes.Code { - if err == nil { - return codes.OK - } - if e, ok := err.(*rpcError); ok { - return e.code - } - return codes.Unknown -} - -// ErrorDesc returns the error description of err if it was produced by the rpc system. -// Otherwise, it returns err.Error() or empty string when err is nil. -func ErrorDesc(err error) string { - if err == nil { - return "" - } - if e, ok := err.(*rpcError); ok { - return e.desc - } - return err.Error() -} - -// Errorf returns an error containing an error code and a description; -// Errorf returns nil if c is OK. -func Errorf(c codes.Code, format string, a ...interface{}) error { - if c == codes.OK { - return nil - } - return &rpcError{ - code: c, - desc: fmt.Sprintf(format, a...), - } -} - -// toRPCErr converts an error into a rpcError. -func toRPCErr(err error) error { - switch e := err.(type) { - case *rpcError: - return err - case transport.StreamError: - return &rpcError{ - code: e.Code, - desc: e.Desc, - } - case transport.ConnectionError: - return &rpcError{ - code: codes.Internal, - desc: e.Desc, - } - default: - switch err { - case context.DeadlineExceeded: - return &rpcError{ - code: codes.DeadlineExceeded, - desc: err.Error(), - } - case context.Canceled: - return &rpcError{ - code: codes.Canceled, - desc: err.Error(), - } - case ErrClientConnClosing: - return &rpcError{ - code: codes.FailedPrecondition, - desc: err.Error(), - } - } - - } - return Errorf(codes.Unknown, "%v", err) -} - -// convertCode converts a standard Go error into its canonical code. Note that -// this is only used to translate the error returned by the server applications. -func convertCode(err error) codes.Code { - switch err { - case nil: - return codes.OK - case io.EOF: - return codes.OutOfRange - case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: - return codes.FailedPrecondition - case os.ErrInvalid: - return codes.InvalidArgument - case context.Canceled: - return codes.Canceled - case context.DeadlineExceeded: - return codes.DeadlineExceeded - } - switch { - case os.IsExist(err): - return codes.AlreadyExists - case os.IsNotExist(err): - return codes.NotFound - case os.IsPermission(err): - return codes.PermissionDenied - } - return codes.Unknown -} - -// SupportPackageIsVersion3 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the grpc package. -// -// This constant may be renamed in the future if a change in the generated code -// requires a synchronised update of grpc-go and protoc-gen-go. This constant -// should not be referenced from any other code. -const SupportPackageIsVersion3 = true diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go deleted file mode 100644 index 2524dd229744..000000000000 --- a/vendor/google.golang.org/grpc/server.go +++ /dev/null @@ -1,897 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "net/http" - "reflect" - "runtime" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/http2" - "golang.org/x/net/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/transport" -) - -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) - -// MethodDesc represents an RPC service's method specification. -type MethodDesc struct { - MethodName string - Handler methodHandler -} - -// ServiceDesc represents an RPC service's specification. -type ServiceDesc struct { - ServiceName string - // The pointer to the service interface. Used to check whether the user - // provided implementation satisfies the interface requirements. - HandlerType interface{} - Methods []MethodDesc - Streams []StreamDesc - Metadata interface{} -} - -// service consists of the information of the server serving this service and -// the methods in this service. -type service struct { - server interface{} // the server for service methods - md map[string]*MethodDesc - sd map[string]*StreamDesc - mdata interface{} -} - -// Server is a gRPC server to serve RPC requests. -type Server struct { - opts options - - mu sync.Mutex // guards following - lis map[net.Listener]bool - conns map[io.Closer]bool - drain bool - // A CondVar to let GracefulStop() blocks until all the pending RPCs are finished - // and all the transport goes away. - cv *sync.Cond - m map[string]*service // service name -> service info - events trace.EventLog -} - -type options struct { - creds credentials.TransportCredentials - codec Codec - cp Compressor - dc Decompressor - maxMsgSize int - unaryInt UnaryServerInterceptor - streamInt StreamServerInterceptor - maxConcurrentStreams uint32 - useHandlerImpl bool // use http.Handler-based server -} - -var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit - -// A ServerOption sets options. -type ServerOption func(*options) - -// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. -func CustomCodec(codec Codec) ServerOption { - return func(o *options) { - o.codec = codec - } -} - -// RPCCompressor returns a ServerOption that sets a compressor for outbound messages. -func RPCCompressor(cp Compressor) ServerOption { - return func(o *options) { - o.cp = cp - } -} - -// RPCDecompressor returns a ServerOption that sets a decompressor for inbound messages. -func RPCDecompressor(dc Decompressor) ServerOption { - return func(o *options) { - o.dc = dc - } -} - -// MaxMsgSize returns a ServerOption to set the max message size in bytes for inbound mesages. -// If this is not set, gRPC uses the default 4MB. -func MaxMsgSize(m int) ServerOption { - return func(o *options) { - o.maxMsgSize = m - } -} - -// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number -// of concurrent streams to each ServerTransport. -func MaxConcurrentStreams(n uint32) ServerOption { - return func(o *options) { - o.maxConcurrentStreams = n - } -} - -// Creds returns a ServerOption that sets credentials for server connections. -func Creds(c credentials.TransportCredentials) ServerOption { - return func(o *options) { - o.creds = c - } -} - -// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the -// server. Only one unary interceptor can be installed. The construction of multiple -// interceptors (e.g., chaining) can be implemented at the caller. -func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { - return func(o *options) { - if o.unaryInt != nil { - panic("The unary server interceptor has been set.") - } - o.unaryInt = i - } -} - -// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the -// server. Only one stream interceptor can be installed. -func StreamInterceptor(i StreamServerInterceptor) ServerOption { - return func(o *options) { - if o.streamInt != nil { - panic("The stream server interceptor has been set.") - } - o.streamInt = i - } -} - -// NewServer creates a gRPC server which has no service registered and has not -// started to accept requests yet. -func NewServer(opt ...ServerOption) *Server { - var opts options - opts.maxMsgSize = defaultMaxMsgSize - for _, o := range opt { - o(&opts) - } - if opts.codec == nil { - // Set the default codec. - opts.codec = protoCodec{} - } - s := &Server{ - lis: make(map[net.Listener]bool), - opts: opts, - conns: make(map[io.Closer]bool), - m: make(map[string]*service), - } - s.cv = sync.NewCond(&s.mu) - if EnableTracing { - _, file, line, _ := runtime.Caller(1) - s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) - } - return s -} - -// printf records an event in s's event log, unless s has been stopped. -// REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...interface{}) { - if s.events != nil { - s.events.Printf(format, a...) - } -} - -// errorf records an error in s's event log, unless s has been stopped. -// REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...interface{}) { - if s.events != nil { - s.events.Errorf(format, a...) - } -} - -// RegisterService register a service and its implementation to the gRPC -// server. Called from the IDL generated code. This must be called before -// invoking Serve. -func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { - ht := reflect.TypeOf(sd.HandlerType).Elem() - st := reflect.TypeOf(ss) - if !st.Implements(ht) { - grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) - } - s.register(sd, ss) -} - -func (s *Server) register(sd *ServiceDesc, ss interface{}) { - s.mu.Lock() - defer s.mu.Unlock() - s.printf("RegisterService(%q)", sd.ServiceName) - if _, ok := s.m[sd.ServiceName]; ok { - grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) - } - srv := &service{ - server: ss, - md: make(map[string]*MethodDesc), - sd: make(map[string]*StreamDesc), - mdata: sd.Metadata, - } - for i := range sd.Methods { - d := &sd.Methods[i] - srv.md[d.MethodName] = d - } - for i := range sd.Streams { - d := &sd.Streams[i] - srv.sd[d.StreamName] = d - } - s.m[sd.ServiceName] = srv -} - -// MethodInfo contains the information of an RPC including its method name and type. -type MethodInfo struct { - // Name is the method name only, without the service name or package name. - Name string - // IsClientStream indicates whether the RPC is a client streaming RPC. - IsClientStream bool - // IsServerStream indicates whether the RPC is a server streaming RPC. - IsServerStream bool -} - -// ServiceInfo contains unary RPC method info, streaming RPC methid info and metadata for a service. -type ServiceInfo struct { - Methods []MethodInfo - // Metadata is the metadata specified in ServiceDesc when registering service. - Metadata interface{} -} - -// GetServiceInfo returns a map from service names to ServiceInfo. -// Service names include the package names, in the form of .. -func (s *Server) GetServiceInfo() map[string]ServiceInfo { - ret := make(map[string]ServiceInfo) - for n, srv := range s.m { - methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) - for m := range srv.md { - methods = append(methods, MethodInfo{ - Name: m, - IsClientStream: false, - IsServerStream: false, - }) - } - for m, d := range srv.sd { - methods = append(methods, MethodInfo{ - Name: m, - IsClientStream: d.ClientStreams, - IsServerStream: d.ServerStreams, - }) - } - - ret[n] = ServiceInfo{ - Methods: methods, - Metadata: srv.mdata, - } - } - return ret -} - -var ( - // ErrServerStopped indicates that the operation is now illegal because of - // the server being stopped. - ErrServerStopped = errors.New("grpc: the server has been stopped") -) - -func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if s.opts.creds == nil { - return rawConn, nil, nil - } - return s.opts.creds.ServerHandshake(rawConn) -} - -// Serve accepts incoming connections on the listener lis, creating a new -// ServerTransport and service goroutine for each. The service goroutines -// read gRPC requests and then call the registered handlers to reply to them. -// Serve returns when lis.Accept fails. lis will be closed when -// this method returns. -func (s *Server) Serve(lis net.Listener) error { - s.mu.Lock() - s.printf("serving") - if s.lis == nil { - s.mu.Unlock() - lis.Close() - return ErrServerStopped - } - s.lis[lis] = true - s.mu.Unlock() - defer func() { - s.mu.Lock() - if s.lis != nil && s.lis[lis] { - lis.Close() - delete(s.lis, lis) - } - s.mu.Unlock() - }() - for { - rawConn, err := lis.Accept() - if err != nil { - s.mu.Lock() - s.printf("done serving; Accept = %v", err) - s.mu.Unlock() - return err - } - // Start a new goroutine to deal with rawConn - // so we don't stall this Accept loop goroutine. - go s.handleRawConn(rawConn) - } -} - -// handleRawConn is run in its own goroutine and handles a just-accepted -// connection that has not had any I/O performed on it yet. -func (s *Server) handleRawConn(rawConn net.Conn) { - conn, authInfo, err := s.useTransportAuthenticator(rawConn) - if err != nil { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - s.mu.Unlock() - grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - // If serverHandShake returns ErrConnDispatched, keep rawConn open. - if err != credentials.ErrConnDispatched { - rawConn.Close() - } - return - } - - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - conn.Close() - return - } - s.mu.Unlock() - - if s.opts.useHandlerImpl { - s.serveUsingHandler(conn) - } else { - s.serveNewHTTP2Transport(conn, authInfo) - } -} - -// serveNewHTTP2Transport sets up a new http/2 transport (using the -// gRPC http2 server transport in transport/http2_server.go) and -// serves streams on it. -// This is run in its own goroutine (it does network I/O in -// transport.NewServerTransport). -func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) { - st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo) - if err != nil { - s.mu.Lock() - s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) - s.mu.Unlock() - c.Close() - grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) - return - } - if !s.addConn(st) { - st.Close() - return - } - s.serveStreams(st) -} - -func (s *Server) serveStreams(st transport.ServerTransport) { - defer s.removeConn(st) - defer st.Close() - var wg sync.WaitGroup - st.HandleStreams(func(stream *transport.Stream) { - wg.Add(1) - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() - }) - wg.Wait() -} - -var _ http.Handler = (*Server)(nil) - -// serveUsingHandler is called from handleRawConn when s is configured -// to handle requests via the http.Handler interface. It sets up a -// net/http.Server to handle the just-accepted conn. The http.Server -// is configured to route all incoming requests (all HTTP/2 streams) -// to ServeHTTP, which creates a new ServerTransport for each stream. -// serveUsingHandler blocks until conn closes. -// -// This codepath is only used when Server.TestingUseHandlerImpl has -// been configured. This lets the end2end tests exercise the ServeHTTP -// method as one of the environment types. -// -// conn is the *tls.Conn that's already been authenticated. -func (s *Server) serveUsingHandler(conn net.Conn) { - if !s.addConn(conn) { - conn.Close() - return - } - defer s.removeConn(conn) - h2s := &http2.Server{ - MaxConcurrentStreams: s.opts.maxConcurrentStreams, - } - h2s.ServeConn(conn, &http2.ServeConnOpts{ - Handler: s, - }) -} - -func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - if !s.addConn(st) { - st.Close() - return - } - defer s.removeConn(st) - s.serveStreams(st) -} - -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - trInfo = &traceInfo{ - tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()), - } - trInfo.firstLine.client = false - trInfo.firstLine.remoteAddr = st.RemoteAddr() - stream.TraceContext(trInfo.tr) - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = dl.Sub(time.Now()) - } - return trInfo -} - -func (s *Server) addConn(c io.Closer) bool { - s.mu.Lock() - defer s.mu.Unlock() - if s.conns == nil || s.drain { - return false - } - s.conns[c] = true - return true -} - -func (s *Server) removeConn(c io.Closer) { - s.mu.Lock() - defer s.mu.Unlock() - if s.conns != nil { - delete(s.conns, c) - s.cv.Signal() - } -} - -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error { - var cbuf *bytes.Buffer - if cp != nil { - cbuf = new(bytes.Buffer) - } - p, err := encode(s.opts.codec, msg, cp, cbuf) - if err != nil { - // This typically indicates a fatal issue (e.g., memory - // corruption or hardware faults) the application program - // cannot handle. - // - // TODO(zhaoq): There exist other options also such as only closing the - // faulty stream locally and remotely (Other streams can keep going). Find - // the optimal option. - grpclog.Fatalf("grpc: Server failed to encode response %v", err) - } - return t.Write(stream, p, opts) -} - -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { - if trInfo != nil { - defer trInfo.tr.Finish() - trInfo.firstLine.client = false - trInfo.tr.LazyLog(&trInfo.firstLine, false) - defer func() { - if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() - } - }() - } - if s.opts.cp != nil { - // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. - stream.SetSendCompress(s.opts.cp.Type()) - } - p := &parser{r: stream} - for { - pf, req, err := p.recvMsg(s.opts.maxMsgSize) - if err == io.EOF { - // The entire stream is done (for unary RPC only). - return err - } - if err == io.ErrUnexpectedEOF { - err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) - } - if err != nil { - switch err := err.(type) { - case *rpcError: - if err := t.WriteStatus(stream, err.code, err.desc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) - } - case transport.ConnectionError: - // Nothing to do here. - case transport.StreamError: - if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) - } - default: - panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err)) - } - return err - } - - if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { - switch err := err.(type) { - case *rpcError: - if err := t.WriteStatus(stream, err.code, err.desc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) - } - default: - if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) - } - - } - return err - } - statusCode := codes.OK - statusDesc := "" - df := func(v interface{}) error { - if pf == compressionMade { - var err error - req, err = s.opts.dc.Do(bytes.NewReader(req)) - if err != nil { - if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) - } - return err - } - } - if len(req) > s.opts.maxMsgSize { - // TODO: Revisit the error code. Currently keep it consistent with - // java implementation. - statusCode = codes.Internal - statusDesc = fmt.Sprintf("grpc: server received a message of %d bytes exceeding %d limit", len(req), s.opts.maxMsgSize) - } - if err := s.opts.codec.Unmarshal(req, v); err != nil { - return err - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) - } - return nil - } - reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) - if appErr != nil { - if err, ok := appErr.(*rpcError); ok { - statusCode = err.code - statusDesc = err.desc - } else { - statusCode = convertCode(appErr) - statusDesc = appErr.Error() - } - if trInfo != nil && statusCode != codes.OK { - trInfo.tr.LazyLog(stringer(statusDesc), true) - trInfo.tr.SetError() - } - if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) - return err - } - return nil - } - if trInfo != nil { - trInfo.tr.LazyLog(stringer("OK"), false) - } - opts := &transport.Options{ - Last: true, - Delay: false, - } - if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { - switch err := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - case transport.StreamError: - statusCode = err.Code - statusDesc = err.Desc - default: - statusCode = codes.Unknown - statusDesc = err.Error() - } - return err - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) - } - return t.WriteStatus(stream, statusCode, statusDesc) - } -} - -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { - if s.opts.cp != nil { - stream.SetSendCompress(s.opts.cp.Type()) - } - ss := &serverStream{ - t: t, - s: stream, - p: &parser{r: stream}, - codec: s.opts.codec, - cp: s.opts.cp, - dc: s.opts.dc, - maxMsgSize: s.opts.maxMsgSize, - trInfo: trInfo, - } - if ss.cp != nil { - ss.cbuf = new(bytes.Buffer) - } - if trInfo != nil { - trInfo.tr.LazyLog(&trInfo.firstLine, false) - defer func() { - ss.mu.Lock() - if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - ss.trInfo.tr.SetError() - } - ss.trInfo.tr.Finish() - ss.trInfo.tr = nil - ss.mu.Unlock() - }() - } - var appErr error - if s.opts.streamInt == nil { - appErr = sd.Handler(srv.server, ss) - } else { - info := &StreamServerInfo{ - FullMethod: stream.Method(), - IsClientStream: sd.ClientStreams, - IsServerStream: sd.ServerStreams, - } - appErr = s.opts.streamInt(srv.server, ss, info, sd.Handler) - } - if appErr != nil { - if err, ok := appErr.(*rpcError); ok { - ss.statusCode = err.code - ss.statusDesc = err.desc - } else if err, ok := appErr.(transport.StreamError); ok { - ss.statusCode = err.Code - ss.statusDesc = err.Desc - } else { - ss.statusCode = convertCode(appErr) - ss.statusDesc = appErr.Error() - } - } - if trInfo != nil { - ss.mu.Lock() - if ss.statusCode != codes.OK { - ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true) - ss.trInfo.tr.SetError() - } else { - ss.trInfo.tr.LazyLog(stringer("OK"), false) - } - ss.mu.Unlock() - } - return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc) - -} - -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { - sm := stream.Method() - if sm != "" && sm[0] == '/' { - sm = sm[1:] - } - pos := strings.LastIndex(sm, "/") - if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) - trInfo.tr.SetError() - } - if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() - } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) - } - if trInfo != nil { - trInfo.tr.Finish() - } - return - } - service := sm[:pos] - method := sm[pos+1:] - srv, ok := s.m[service] - if !ok { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) - trInfo.tr.SetError() - } - if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() - } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) - } - if trInfo != nil { - trInfo.tr.Finish() - } - return - } - // Unary RPC or Streaming RPC? - if md, ok := srv.md[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) - return - } - if sd, ok := srv.sd[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) - return - } - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) - trInfo.tr.SetError() - } - if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() - } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) - } - if trInfo != nil { - trInfo.tr.Finish() - } -} - -// Stop stops the gRPC server. It immediately closes all open -// connections and listeners. -// It cancels all active RPCs on the server side and the corresponding -// pending RPCs on the client side will get notified by connection -// errors. -func (s *Server) Stop() { - s.mu.Lock() - listeners := s.lis - s.lis = nil - st := s.conns - s.conns = nil - // interrupt GracefulStop if Stop and GracefulStop are called concurrently. - s.cv.Signal() - s.mu.Unlock() - - for lis := range listeners { - lis.Close() - } - for c := range st { - c.Close() - } - - s.mu.Lock() - if s.events != nil { - s.events.Finish() - s.events = nil - } - s.mu.Unlock() -} - -// GracefulStop stops the gRPC server gracefully. It stops the server to accept new -// connections and RPCs and blocks until all the pending RPCs are finished. -func (s *Server) GracefulStop() { - s.mu.Lock() - defer s.mu.Unlock() - if s.drain == true || s.conns == nil { - return - } - s.drain = true - for lis := range s.lis { - lis.Close() - } - s.lis = nil - for c := range s.conns { - c.(transport.ServerTransport).Drain() - } - for len(s.conns) != 0 { - s.cv.Wait() - } - s.conns = nil - if s.events != nil { - s.events.Finish() - s.events = nil - } -} - -func init() { - internal.TestingCloseConns = func(arg interface{}) { - arg.(*Server).testingCloseConns() - } - internal.TestingUseHandlerImpl = func(arg interface{}) { - arg.(*Server).opts.useHandlerImpl = true - } -} - -// testingCloseConns closes all existing transports but keeps s.lis -// accepting new connections. -func (s *Server) testingCloseConns() { - s.mu.Lock() - for c := range s.conns { - c.Close() - delete(s.conns, c) - } - s.mu.Unlock() -} - -// SendHeader sends header metadata. It may be called at most once from a unary -// RPC handler. The ctx is the RPC handler's Context or one derived from it. -func SendHeader(ctx context.Context, md metadata.MD) error { - if md.Len() == 0 { - return nil - } - stream, ok := transport.StreamFromContext(ctx) - if !ok { - return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx) - } - t := stream.ServerTransport() - if t == nil { - grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream) - } - return t.WriteHeader(stream, md) -} - -// SetTrailer sets the trailer metadata that will be sent when an RPC returns. -// It may be called at most once from a unary RPC handler. The ctx is the RPC -// handler's Context or one derived from it. -func SetTrailer(ctx context.Context, md metadata.MD) error { - if md.Len() == 0 { - return nil - } - stream, ok := transport.StreamFromContext(ctx) - if !ok { - return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx) - } - return stream.SetTrailer(md) -} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go deleted file mode 100644 index e1b4759e7268..000000000000 --- a/vendor/google.golang.org/grpc/stream.go +++ /dev/null @@ -1,512 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "bytes" - "errors" - "io" - "math" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/transport" -) - -// StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. -type StreamHandler func(srv interface{}, stream ServerStream) error - -// StreamDesc represents a streaming RPC service's method specification. -type StreamDesc struct { - StreamName string - Handler StreamHandler - - // At least one of these is true. - ServerStreams bool - ClientStreams bool -} - -// Stream defines the common interface a client or server stream has to satisfy. -type Stream interface { - // Context returns the context for this stream. - Context() context.Context - // SendMsg blocks until it sends m, the stream is done or the stream - // breaks. - // On error, it aborts the stream and returns an RPC status on client - // side. On server side, it simply returns the error to the caller. - // SendMsg is called by generated code. Also Users can call SendMsg - // directly when it is really needed in their use cases. - SendMsg(m interface{}) error - // RecvMsg blocks until it receives a message or the stream is - // done. On client side, it returns io.EOF when the stream is done. On - // any other error, it aborts the stream and returns an RPC status. On - // server side, it simply returns the error to the caller. - RecvMsg(m interface{}) error -} - -// ClientStream defines the interface a client stream has to satisfy. -type ClientStream interface { - // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. - Header() (metadata.MD, error) - // Trailer returns the trailer metadata from the server, if there is any. - // It must only be called after stream.CloseAndRecv has returned, or - // stream.Recv has returned a non-nil error (including io.EOF). - Trailer() metadata.MD - // CloseSend closes the send direction of the stream. It closes the stream - // when non-nil error is met. - CloseSend() error - Stream -} - -// NewClientStream creates a new Stream for the client side. This is called -// by generated code. -func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { - if cc.dopts.streamInt != nil { - return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) - } - return newClientStream(ctx, desc, cc, method, opts...) -} - -func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { - var ( - t transport.ClientTransport - s *transport.Stream - put func() - ) - c := defaultCallInfo - for _, o := range opts { - if err := o.before(&c); err != nil { - return nil, toRPCErr(err) - } - } - callHdr := &transport.CallHdr{ - Host: cc.authority, - Method: method, - Flush: desc.ServerStreams && desc.ClientStreams, - } - if cc.dopts.cp != nil { - callHdr.SendCompress = cc.dopts.cp.Type() - } - var trInfo traceInfo - if EnableTracing { - trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) - trInfo.firstLine.client = true - if deadline, ok := ctx.Deadline(); ok { - trInfo.firstLine.deadline = deadline.Sub(time.Now()) - } - trInfo.tr.LazyLog(&trInfo.firstLine, false) - ctx = trace.NewContext(ctx, trInfo.tr) - defer func() { - if err != nil { - // Need to call tr.finish() if error is returned. - // Because tr will not be returned to caller. - trInfo.tr.LazyPrintf("RPC: [%v]", err) - trInfo.tr.SetError() - trInfo.tr.Finish() - } - }() - } - gopts := BalancerGetOptions{ - BlockingWait: !c.failFast, - } - for { - t, put, err = cc.getTransport(ctx, gopts) - if err != nil { - // TODO(zhaoq): Probably revisit the error handling. - if _, ok := err.(*rpcError); ok { - return nil, err - } - if err == errConnClosing || err == errConnUnavailable { - if c.failFast { - return nil, Errorf(codes.Unavailable, "%v", err) - } - continue - } - // All the other errors are treated as Internal errors. - return nil, Errorf(codes.Internal, "%v", err) - } - - s, err = t.NewStream(ctx, callHdr) - if err != nil { - if put != nil { - put() - put = nil - } - if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { - if c.failFast { - return nil, toRPCErr(err) - } - continue - } - return nil, toRPCErr(err) - } - break - } - cs := &clientStream{ - opts: opts, - c: c, - desc: desc, - codec: cc.dopts.codec, - cp: cc.dopts.cp, - dc: cc.dopts.dc, - - put: put, - t: t, - s: s, - p: &parser{r: s}, - - tracing: EnableTracing, - trInfo: trInfo, - } - if cc.dopts.cp != nil { - cs.cbuf = new(bytes.Buffer) - } - // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination - // when there is no pending I/O operations on this stream. - go func() { - select { - case <-t.Error(): - // Incur transport error, simply exit. - case <-s.Done(): - // TODO: The trace of the RPC is terminated here when there is no pending - // I/O, which is probably not the optimal solution. - if s.StatusCode() == codes.OK { - cs.finish(nil) - } else { - cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc())) - } - cs.closeTransportStream(nil) - case <-s.GoAway(): - cs.finish(errConnDrain) - cs.closeTransportStream(errConnDrain) - case <-s.Context().Done(): - err := s.Context().Err() - cs.finish(err) - cs.closeTransportStream(transport.ContextErr(err)) - } - }() - return cs, nil -} - -// clientStream implements a client side Stream. -type clientStream struct { - opts []CallOption - c callInfo - t transport.ClientTransport - s *transport.Stream - p *parser - desc *StreamDesc - codec Codec - cp Compressor - cbuf *bytes.Buffer - dc Decompressor - - tracing bool // set to EnableTracing when the clientStream is created. - - mu sync.Mutex - put func() - closed bool - // trInfo.tr is set when the clientStream is created (if EnableTracing is true), - // and is set to nil when the clientStream's finish method is called. - trInfo traceInfo -} - -func (cs *clientStream) Context() context.Context { - return cs.s.Context() -} - -func (cs *clientStream) Header() (metadata.MD, error) { - m, err := cs.s.Header() - if err != nil { - if _, ok := err.(transport.ConnectionError); !ok { - cs.closeTransportStream(err) - } - } - return m, err -} - -func (cs *clientStream) Trailer() metadata.MD { - return cs.s.Trailer() -} - -func (cs *clientStream) SendMsg(m interface{}) (err error) { - if cs.tracing { - cs.mu.Lock() - if cs.trInfo.tr != nil { - cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) - } - cs.mu.Unlock() - } - defer func() { - if err != nil { - cs.finish(err) - } - if err == nil { - return - } - if err == io.EOF { - // Specialize the process for server streaming. SendMesg is only called - // once when creating the stream object. io.EOF needs to be skipped when - // the rpc is early finished (before the stream object is created.). - // TODO: It is probably better to move this into the generated code. - if !cs.desc.ClientStreams && cs.desc.ServerStreams { - err = nil - } - return - } - if _, ok := err.(transport.ConnectionError); !ok { - cs.closeTransportStream(err) - } - err = toRPCErr(err) - }() - out, err := encode(cs.codec, m, cs.cp, cs.cbuf) - defer func() { - if cs.cbuf != nil { - cs.cbuf.Reset() - } - }() - if err != nil { - return Errorf(codes.Internal, "grpc: %v", err) - } - return cs.t.Write(cs.s, out, &transport.Options{Last: false}) -} - -func (cs *clientStream) RecvMsg(m interface{}) (err error) { - err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32) - defer func() { - // err != nil indicates the termination of the stream. - if err != nil { - cs.finish(err) - } - }() - if err == nil { - if cs.tracing { - cs.mu.Lock() - if cs.trInfo.tr != nil { - cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) - } - cs.mu.Unlock() - } - if !cs.desc.ClientStreams || cs.desc.ServerStreams { - return - } - // Special handling for client streaming rpc. - err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32) - cs.closeTransportStream(err) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { - if cs.s.StatusCode() == codes.OK { - cs.finish(err) - return nil - } - return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc()) - } - return toRPCErr(err) - } - if _, ok := err.(transport.ConnectionError); !ok { - cs.closeTransportStream(err) - } - if err == io.EOF { - if cs.s.StatusCode() == codes.OK { - // Returns io.EOF to indicate the end of the stream. - return - } - return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc()) - } - return toRPCErr(err) -} - -func (cs *clientStream) CloseSend() (err error) { - err = cs.t.Write(cs.s, nil, &transport.Options{Last: true}) - defer func() { - if err != nil { - cs.finish(err) - } - }() - if err == nil || err == io.EOF { - return nil - } - if _, ok := err.(transport.ConnectionError); !ok { - cs.closeTransportStream(err) - } - err = toRPCErr(err) - return -} - -func (cs *clientStream) closeTransportStream(err error) { - cs.mu.Lock() - if cs.closed { - cs.mu.Unlock() - return - } - cs.closed = true - cs.mu.Unlock() - cs.t.CloseStream(cs.s, err) -} - -func (cs *clientStream) finish(err error) { - cs.mu.Lock() - defer cs.mu.Unlock() - for _, o := range cs.opts { - o.after(&cs.c) - } - if cs.put != nil { - cs.put() - cs.put = nil - } - if !cs.tracing { - return - } - if cs.trInfo.tr != nil { - if err == nil || err == io.EOF { - cs.trInfo.tr.LazyPrintf("RPC: [OK]") - } else { - cs.trInfo.tr.LazyPrintf("RPC: [%v]", err) - cs.trInfo.tr.SetError() - } - cs.trInfo.tr.Finish() - cs.trInfo.tr = nil - } -} - -// ServerStream defines the interface a server stream has to satisfy. -type ServerStream interface { - // SendHeader sends the header metadata. It should not be called - // after SendProto. It fails if called multiple times or if - // called after SendProto. - SendHeader(metadata.MD) error - // SetTrailer sets the trailer metadata which will be sent with the - // RPC status. - SetTrailer(metadata.MD) - Stream -} - -// serverStream implements a server side Stream. -type serverStream struct { - t transport.ServerTransport - s *transport.Stream - p *parser - codec Codec - cp Compressor - dc Decompressor - cbuf *bytes.Buffer - maxMsgSize int - statusCode codes.Code - statusDesc string - trInfo *traceInfo - - mu sync.Mutex // protects trInfo.tr after the service handler runs. -} - -func (ss *serverStream) Context() context.Context { - return ss.s.Context() -} - -func (ss *serverStream) SendHeader(md metadata.MD) error { - return ss.t.WriteHeader(ss.s, md) -} - -func (ss *serverStream) SetTrailer(md metadata.MD) { - if md.Len() == 0 { - return - } - ss.s.SetTrailer(md) - return -} - -func (ss *serverStream) SendMsg(m interface{}) (err error) { - defer func() { - if ss.trInfo != nil { - ss.mu.Lock() - if ss.trInfo.tr != nil { - if err == nil { - ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) - } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - ss.trInfo.tr.SetError() - } - } - ss.mu.Unlock() - } - }() - out, err := encode(ss.codec, m, ss.cp, ss.cbuf) - defer func() { - if ss.cbuf != nil { - ss.cbuf.Reset() - } - }() - if err != nil { - err = Errorf(codes.Internal, "grpc: %v", err) - return err - } - if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil { - return toRPCErr(err) - } - return nil -} - -func (ss *serverStream) RecvMsg(m interface{}) (err error) { - defer func() { - if ss.trInfo != nil { - ss.mu.Lock() - if ss.trInfo.tr != nil { - if err == nil { - ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) - } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - ss.trInfo.tr.SetError() - } - } - ss.mu.Unlock() - } - }() - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize); err != nil { - if err == io.EOF { - return err - } - if err == io.ErrUnexpectedEOF { - err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) - } - return toRPCErr(err) - } - return nil -} diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go deleted file mode 100644 index f6747e1dfa4b..000000000000 --- a/vendor/google.golang.org/grpc/trace.go +++ /dev/null @@ -1,119 +0,0 @@ -/* - * - * Copyright 2015, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "bytes" - "fmt" - "io" - "net" - "strings" - "time" - - "golang.org/x/net/trace" -) - -// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. -// This should only be set before any RPCs are sent or received by this program. -var EnableTracing = true - -// methodFamily returns the trace family for the given method. -// It turns "/pkg.Service/GetFoo" into "pkg.Service". -func methodFamily(m string) string { - m = strings.TrimPrefix(m, "/") // remove leading slash - if i := strings.Index(m, "/"); i >= 0 { - m = m[:i] // remove everything from second slash - } - if i := strings.LastIndex(m, "."); i >= 0 { - m = m[i+1:] // cut down to last dotted component - } - return m -} - -// traceInfo contains tracing information for an RPC. -type traceInfo struct { - tr trace.Trace - firstLine firstLine -} - -// firstLine is the first line of an RPC trace. -type firstLine struct { - client bool // whether this is a client (outgoing) RPC - remoteAddr net.Addr - deadline time.Duration // may be zero -} - -func (f *firstLine) String() string { - var line bytes.Buffer - io.WriteString(&line, "RPC: ") - if f.client { - io.WriteString(&line, "to") - } else { - io.WriteString(&line, "from") - } - fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) - if f.deadline != 0 { - fmt.Fprint(&line, f.deadline) - } else { - io.WriteString(&line, "none") - } - return line.String() -} - -// payload represents an RPC request or response payload. -type payload struct { - sent bool // whether this is an outgoing payload - msg interface{} // e.g. a proto.Message - // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? -} - -func (p payload) String() string { - if p.sent { - return fmt.Sprintf("sent: %v", p.msg) - } - return fmt.Sprintf("recv: %v", p.msg) -} - -type fmtStringer struct { - format string - a []interface{} -} - -func (f *fmtStringer) String() string { - return fmt.Sprintf(f.format, f.a...) -} - -type stringer string - -func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/transport/BUILD b/vendor/google.golang.org/grpc/transport/BUILD deleted file mode 100644 index 601216800336..000000000000 --- a/vendor/google.golang.org/grpc/transport/BUILD +++ /dev/null @@ -1,46 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "control.go", - "go16.go", - "go17.go", - "handler_server.go", - "http2_client.go", - "http2_server.go", - "http_util.go", - "pre_go16.go", - "transport.go", - ], - importpath = "google.golang.org/grpc/transport", - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/golang.org/x/net/http2:go_default_library", - "//vendor/golang.org/x/net/http2/hpack:go_default_library", - "//vendor/golang.org/x/net/trace:go_default_library", - "//vendor/google.golang.org/grpc/codes:go_default_library", - "//vendor/google.golang.org/grpc/credentials:go_default_library", - "//vendor/google.golang.org/grpc/grpclog:go_default_library", - "//vendor/google.golang.org/grpc/metadata:go_default_library", - "//vendor/google.golang.org/grpc/peer:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go deleted file mode 100644 index 4ef0830b56ca..000000000000 --- a/vendor/google.golang.org/grpc/transport/control.go +++ /dev/null @@ -1,215 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "fmt" - "sync" - - "golang.org/x/net/http2" -) - -const ( - // The default value of flow control window size in HTTP2 spec. - defaultWindowSize = 65535 - // The initial window size for flow control. - initialWindowSize = defaultWindowSize // for an RPC - initialConnWindowSize = defaultWindowSize * 16 // for a connection -) - -// The following defines various control items which could flow through -// the control buffer of transport. They represent different aspects of -// control tasks, e.g., flow control, settings, streaming resetting, etc. -type windowUpdate struct { - streamID uint32 - increment uint32 -} - -func (*windowUpdate) item() {} - -type settings struct { - ack bool - ss []http2.Setting -} - -func (*settings) item() {} - -type resetStream struct { - streamID uint32 - code http2.ErrCode -} - -func (*resetStream) item() {} - -type goAway struct { -} - -func (*goAway) item() {} - -type flushIO struct { -} - -func (*flushIO) item() {} - -type ping struct { - ack bool - data [8]byte -} - -func (*ping) item() {} - -// quotaPool is a pool which accumulates the quota and sends it to acquire() -// when it is available. -type quotaPool struct { - c chan int - - mu sync.Mutex - quota int -} - -// newQuotaPool creates a quotaPool which has quota q available to consume. -func newQuotaPool(q int) *quotaPool { - qb := "aPool{ - c: make(chan int, 1), - } - if q > 0 { - qb.c <- q - } else { - qb.quota = q - } - return qb -} - -// add adds n to the available quota and tries to send it on acquire. -func (qb *quotaPool) add(n int) { - qb.mu.Lock() - defer qb.mu.Unlock() - qb.quota += n - if qb.quota <= 0 { - return - } - select { - case qb.c <- qb.quota: - qb.quota = 0 - default: - } -} - -// cancel cancels the pending quota sent on acquire, if any. -func (qb *quotaPool) cancel() { - qb.mu.Lock() - defer qb.mu.Unlock() - select { - case n := <-qb.c: - qb.quota += n - default: - } -} - -// reset cancels the pending quota sent on acquired, incremented by v and sends -// it back on acquire. -func (qb *quotaPool) reset(v int) { - qb.mu.Lock() - defer qb.mu.Unlock() - select { - case n := <-qb.c: - qb.quota += n - default: - } - qb.quota += v - if qb.quota <= 0 { - return - } - select { - case qb.c <- qb.quota: - qb.quota = 0 - default: - } -} - -// acquire returns the channel on which available quota amounts are sent. -func (qb *quotaPool) acquire() <-chan int { - return qb.c -} - -// inFlow deals with inbound flow control -type inFlow struct { - // The inbound flow control limit for pending data. - limit uint32 - - mu sync.Mutex - // pendingData is the overall data which have been received but not been - // consumed by applications. - pendingData uint32 - // The amount of data the application has consumed but grpc has not sent - // window update for them. Used to reduce window update frequency. - pendingUpdate uint32 -} - -// onData is invoked when some data frame is received. It updates pendingData. -func (f *inFlow) onData(n uint32) error { - f.mu.Lock() - defer f.mu.Unlock() - f.pendingData += n - if f.pendingData+f.pendingUpdate > f.limit { - return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit) - } - return nil -} - -// onRead is invoked when the application reads the data. It returns the window size -// to be sent to the peer. -func (f *inFlow) onRead(n uint32) uint32 { - f.mu.Lock() - defer f.mu.Unlock() - if f.pendingData == 0 { - return 0 - } - f.pendingData -= n - f.pendingUpdate += n - if f.pendingUpdate >= f.limit/4 { - wu := f.pendingUpdate - f.pendingUpdate = 0 - return wu - } - return 0 -} - -func (f *inFlow) resetPendingData() uint32 { - f.mu.Lock() - defer f.mu.Unlock() - n := f.pendingData - f.pendingData = 0 - return n -} diff --git a/vendor/google.golang.org/grpc/transport/go16.go b/vendor/google.golang.org/grpc/transport/go16.go deleted file mode 100644 index ee1c46bad575..000000000000 --- a/vendor/google.golang.org/grpc/transport/go16.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build go1.6,!go1.7 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "net" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) -} diff --git a/vendor/google.golang.org/grpc/transport/go17.go b/vendor/google.golang.org/grpc/transport/go17.go deleted file mode 100644 index 356f13ff1972..000000000000 --- a/vendor/google.golang.org/grpc/transport/go17.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build go1.7 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "net" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - return (&net.Dialer{}).DialContext(ctx, network, address) -} diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go deleted file mode 100644 index 114e34906a14..000000000000 --- a/vendor/google.golang.org/grpc/transport/handler_server.go +++ /dev/null @@ -1,397 +0,0 @@ -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// This file is the implementation of a gRPC server using HTTP/2 which -// uses the standard Go http2 Server implementation (via the -// http.Handler interface), rather than speaking low-level HTTP/2 -// frames itself. It is the implementation of *grpc.Server.ServeHTTP. - -package transport - -import ( - "errors" - "fmt" - "io" - "net" - "net/http" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/http2" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" -) - -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) { - if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") - } - if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") - } - if !validContentType(r.Header.Get("Content-Type")) { - return nil, errors.New("invalid gRPC request content-type") - } - if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") - } - if _, ok := w.(http.CloseNotifier); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier") - } - - st := &serverHandlerTransport{ - rw: w, - req: r, - closedCh: make(chan struct{}), - writes: make(chan func()), - } - - if v := r.Header.Get("grpc-timeout"); v != "" { - to, err := decodeTimeout(v) - if err != nil { - return nil, streamErrorf(codes.Internal, "malformed time-out: %v", err) - } - st.timeoutSet = true - st.timeout = to - } - - var metakv []string - if r.Host != "" { - metakv = append(metakv, ":authority", r.Host) - } - for k, vv := range r.Header { - k = strings.ToLower(k) - if isReservedHeader(k) && !isWhitelistedPseudoHeader(k) { - continue - } - for _, v := range vv { - if k == "user-agent" { - // user-agent is special. Copying logic of http_util.go. - if i := strings.LastIndex(v, " "); i == -1 { - // There is no application user agent string being set - continue - } else { - v = v[:i] - } - } - metakv = append(metakv, k, v) - } - } - st.headerMD = metadata.Pairs(metakv...) - - return st, nil -} - -// serverHandlerTransport is an implementation of ServerTransport -// which replies to exactly one gRPC request (exactly one HTTP request), -// using the net/http.Handler interface. This http.Handler is guaranteed -// at this point to be speaking over HTTP/2, so it's able to speak valid -// gRPC. -type serverHandlerTransport struct { - rw http.ResponseWriter - req *http.Request - timeoutSet bool - timeout time.Duration - didCommonHeaders bool - - headerMD metadata.MD - - closeOnce sync.Once - closedCh chan struct{} // closed on Close - - // writes is a channel of code to run serialized in the - // ServeHTTP (HandleStreams) goroutine. The channel is closed - // when WriteStatus is called. - writes chan func() -} - -func (ht *serverHandlerTransport) Close() error { - ht.closeOnce.Do(ht.closeCloseChanOnce) - return nil -} - -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - -func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } - -// strAddr is a net.Addr backed by either a TCP "ip:port" string, or -// the empty string if unknown. -type strAddr string - -func (a strAddr) Network() string { - if a != "" { - // Per the documentation on net/http.Request.RemoteAddr, if this is - // set, it's set to the IP:port of the peer (hence, TCP): - // https://golang.org/pkg/net/http/#Request - // - // If we want to support Unix sockets later, we can - // add our own grpc-specific convention within the - // grpc codebase to set RemoteAddr to a different - // format, or probably better: we can attach it to the - // context and use that from serverHandlerTransport.RemoteAddr. - return "tcp" - } - return "" -} - -func (a strAddr) String() string { return string(a) } - -// do runs fn in the ServeHTTP goroutine. -func (ht *serverHandlerTransport) do(fn func()) error { - select { - case ht.writes <- fn: - return nil - case <-ht.closedCh: - return ErrConnClosing - } -} - -func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { - err := ht.do(func() { - ht.writeCommonHeaders(s) - - // And flush, in case no header or body has been sent yet. - // This forces a separation of headers and trailers if this is the - // first call (for example, in end2end tests's TestNoService). - ht.rw.(http.Flusher).Flush() - - h := ht.rw.Header() - h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode)) - if statusDesc != "" { - h.Set("Grpc-Message", encodeGrpcMessage(statusDesc)) - } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - // http2 ResponseWriter mechanism to - // send undeclared Trailers after the - // headers have possibly been written. - h.Add(http2.TrailerPrefix+k, v) - } - } - } - }) - close(ht.writes) - return err -} - -// writeCommonHeaders sets common headers on the first write -// call (Write, WriteHeader, or WriteStatus). -func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { - if ht.didCommonHeaders { - return - } - ht.didCommonHeaders = true - - h := ht.rw.Header() - h["Date"] = nil // suppress Date to make tests happy; TODO: restore - h.Set("Content-Type", "application/grpc") - - // Predeclare trailers we'll set later in WriteStatus (after the body). - // This is a SHOULD in the HTTP RFC, and the way you add (known) - // Trailers per the net/http.ResponseWriter contract. - // See https://golang.org/pkg/net/http/#ResponseWriter - // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers - h.Add("Trailer", "Grpc-Status") - h.Add("Trailer", "Grpc-Message") - - if s.sendCompress != "" { - h.Set("Grpc-Encoding", s.sendCompress) - } -} - -func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error { - return ht.do(func() { - ht.writeCommonHeaders(s) - ht.rw.Write(data) - if !opts.Delay { - ht.rw.(http.Flusher).Flush() - } - }) -} - -func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { - return ht.do(func() { - ht.writeCommonHeaders(s) - h := ht.rw.Header() - for k, vv := range md { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - h.Add(k, v) - } - } - ht.rw.WriteHeader(200) - ht.rw.(http.Flusher).Flush() - }) -} - -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { - // With this transport type there will be exactly 1 stream: this HTTP request. - - var ctx context.Context - var cancel context.CancelFunc - if ht.timeoutSet { - ctx, cancel = context.WithTimeout(context.Background(), ht.timeout) - } else { - ctx, cancel = context.WithCancel(context.Background()) - } - - // requestOver is closed when either the request's context is done - // or the status has been written via WriteStatus. - requestOver := make(chan struct{}) - - // clientGone receives a single value if peer is gone, either - // because the underlying connection is dead or because the - // peer sends an http2 RST_STREAM. - clientGone := ht.rw.(http.CloseNotifier).CloseNotify() - go func() { - select { - case <-requestOver: - return - case <-ht.closedCh: - case <-clientGone: - } - cancel() - }() - - req := ht.req - - s := &Stream{ - id: 0, // irrelevant - windowHandler: func(int) {}, // nothing - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - } - pr := &peer.Peer{ - Addr: ht.RemoteAddr(), - } - if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} - } - ctx = metadata.NewContext(ctx, ht.headerMD) - ctx = peer.NewContext(ctx, pr) - s.ctx = newContextWithStream(ctx, s) - s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf} - - // readerDone is closed when the Body.Read-ing goroutine exits. - readerDone := make(chan struct{}) - go func() { - defer close(readerDone) - - // TODO: minimize garbage, optimize recvBuffer code/ownership - const readSize = 8196 - for buf := make([]byte, readSize); ; { - n, err := req.Body.Read(buf) - if n > 0 { - s.buf.put(&recvMsg{data: buf[:n:n]}) - buf = buf[n:] - } - if err != nil { - s.buf.put(&recvMsg{err: mapRecvMsgError(err)}) - return - } - if len(buf) == 0 { - buf = make([]byte, readSize) - } - } - }() - - // startStream is provided by the *grpc.Server's serveStreams. - // It starts a goroutine serving s and exits immediately. - // The goroutine that is started is the one that then calls - // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. - startStream(s) - - ht.runStream() - close(requestOver) - - // Wait for reading goroutine to finish. - req.Body.Close() - <-readerDone -} - -func (ht *serverHandlerTransport) runStream() { - for { - select { - case fn, ok := <-ht.writes: - if !ok { - return - } - fn() - case <-ht.closedCh: - return - } - } -} - -func (ht *serverHandlerTransport) Drain() { - panic("Drain() is not implemented") -} - -// mapRecvMsgError returns the non-nil err into the appropriate -// error value as expected by callers of *grpc.parser.recvMsg. -// In particular, in can only be: -// * io.EOF -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * of type transport.StreamError -func mapRecvMsgError(err error) error { - if err == io.EOF || err == io.ErrUnexpectedEOF { - return err - } - if se, ok := err.(http2.StreamError); ok { - if code, ok := http2ErrConvTab[se.Code]; ok { - return StreamError{ - Code: code, - Desc: se.Error(), - } - } - } - return connectionErrorf(true, err, err.Error()) -} diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go deleted file mode 100644 index b7c736e054d5..000000000000 --- a/vendor/google.golang.org/grpc/transport/http2_client.go +++ /dev/null @@ -1,1064 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "bytes" - "fmt" - "io" - "math" - "net" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" -) - -// http2Client implements the ClientTransport interface with HTTP2. -type http2Client struct { - target string // server name/addr - userAgent string - conn net.Conn // underlying communication channel - authInfo credentials.AuthInfo // auth info about the connection - nextID uint32 // the next stream ID to be used - - // writableChan synchronizes write access to the transport. - // A writer acquires the write lock by sending a value on writableChan - // and releases it by receiving from writableChan. - writableChan chan int - // shutdownChan is closed when Close is called. - // Blocking operations should select on shutdownChan to avoid - // blocking forever after Close. - // TODO(zhaoq): Maybe have a channel context? - shutdownChan chan struct{} - // errorChan is closed to notify the I/O error to the caller. - errorChan chan struct{} - // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) - // that the server sent GoAway on this transport. - goAway chan struct{} - - framer *framer - hBuf *bytes.Buffer // the buffer for HPACK encoding - hEnc *hpack.Encoder // HPACK encoder - - // controlBuf delivers all the control related tasks (e.g., window - // updates, reset streams, and various settings) to the controller. - controlBuf *recvBuffer - fc *inFlow - // sendQuotaPool provides flow control to outbound message. - sendQuotaPool *quotaPool - // streamsQuota limits the max number of concurrent streams. - streamsQuota *quotaPool - - // The scheme used: https if TLS is on, http otherwise. - scheme string - - creds []credentials.PerRPCCredentials - - mu sync.Mutex // guard the following variables - state transportState // the state of underlying connection - activeStreams map[uint32]*Stream - // The max number of concurrent streams - maxStreams int - // the per-stream outbound flow control window size set by the peer. - streamSendQuota uint32 - // goAwayID records the Last-Stream-ID in the GoAway frame from the server. - goAwayID uint32 - // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. - prevGoAwayID uint32 -} - -func dial(fn func(context.Context, string) (net.Conn, error), ctx context.Context, addr string) (net.Conn, error) { - if fn != nil { - return fn(ctx, addr) - } - return dialContext(ctx, "tcp", addr) -} - -func isTemporary(err error) bool { - switch err { - case io.EOF: - // Connection closures may be resolved upon retry, and are thus - // treated as temporary. - return true - case context.DeadlineExceeded: - // In Go 1.7, context.DeadlineExceeded implements Timeout(), and this - // special case is not needed. Until then, we need to keep this - // clause. - return true - } - - switch err := err.(type) { - case interface { - Temporary() bool - }: - return err.Temporary() - case interface { - Timeout() bool - }: - // Timeouts may be resolved upon retry, and are thus treated as - // temporary. - return err.Timeout() - } - return false -} - -// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 -// and starts to receive messages on it. Non-nil error returns if construction -// fails. -func newHTTP2Client(ctx context.Context, addr string, opts ConnectOptions) (_ ClientTransport, err error) { - scheme := "http" - conn, err := dial(opts.Dialer, ctx, addr) - if err != nil { - return nil, connectionErrorf(true, err, "transport: %v", err) - } - // Any further errors will close the underlying connection - defer func(conn net.Conn) { - if err != nil { - conn.Close() - } - }(conn) - var authInfo credentials.AuthInfo - if creds := opts.TransportCredentials; creds != nil { - scheme = "https" - conn, authInfo, err = creds.ClientHandshake(ctx, addr, conn) - if err != nil { - // Credentials handshake errors are typically considered permanent - // to avoid retrying on e.g. bad certificates. - temp := isTemporary(err) - return nil, connectionErrorf(temp, err, "transport: %v", err) - } - } - ua := primaryUA - if opts.UserAgent != "" { - ua = opts.UserAgent + " " + ua - } - var buf bytes.Buffer - t := &http2Client{ - target: addr, - userAgent: ua, - conn: conn, - authInfo: authInfo, - // The client initiated stream id is odd starting from 1. - nextID: 1, - writableChan: make(chan int, 1), - shutdownChan: make(chan struct{}), - errorChan: make(chan struct{}), - goAway: make(chan struct{}), - framer: newFramer(conn), - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - controlBuf: newRecvBuffer(), - fc: &inFlow{limit: initialConnWindowSize}, - sendQuotaPool: newQuotaPool(defaultWindowSize), - scheme: scheme, - state: reachable, - activeStreams: make(map[uint32]*Stream), - creds: opts.PerRPCCredentials, - maxStreams: math.MaxInt32, - streamSendQuota: defaultWindowSize, - } - // Start the reader goroutine for incoming message. Each transport has - // a dedicated goroutine which reads HTTP2 frame from network. Then it - // dispatches the frame to the corresponding stream entity. - go t.reader() - // Send connection preface to server. - n, err := t.conn.Write(clientPreface) - if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: %v", err) - } - if n != len(clientPreface) { - t.Close() - return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - } - if initialWindowSize != defaultWindowSize { - err = t.framer.writeSettings(true, http2.Setting{ - ID: http2.SettingInitialWindowSize, - Val: uint32(initialWindowSize), - }) - } else { - err = t.framer.writeSettings(true) - } - if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: %v", err) - } - // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { - if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: %v", err) - } - } - go t.controller() - t.writableChan <- 0 - return t, nil -} - -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { - // TODO(zhaoq): Handle uint32 overflow of Stream.id. - s := &Stream{ - id: t.nextID, - done: make(chan struct{}), - goAway: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - fc: &inFlow{limit: initialWindowSize}, - sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), - headerChan: make(chan struct{}), - } - t.nextID += 2 - s.windowHandler = func(n int) { - t.updateWindow(s, uint32(n)) - } - // The client side stream context should have exactly the same life cycle with the user provided context. - // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. - // So we use the original context here instead of creating a copy. - s.ctx = ctx - s.dec = &recvBufferReader{ - ctx: s.ctx, - goAway: s.goAway, - recv: s.buf, - } - return s -} - -// NewStream creates a stream and register it into the transport as "active" -// streams. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { - // Record the timeout value on the context. - var timeout time.Duration - if dl, ok := ctx.Deadline(); ok { - timeout = dl.Sub(time.Now()) - } - select { - case <-ctx.Done(): - return nil, ContextErr(ctx.Err()) - default: - } - pr := &peer.Peer{ - Addr: t.conn.RemoteAddr(), - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - ctx = peer.NewContext(ctx, pr) - authData := make(map[string]string) - for _, c := range t.creds { - // Construct URI required to get auth request metadata. - var port string - if pos := strings.LastIndex(t.target, ":"); pos != -1 { - // Omit port if it is the default one. - if t.target[pos+1:] != "443" { - port = ":" + t.target[pos+1:] - } - } - pos := strings.LastIndex(callHdr.Method, "/") - if pos == -1 { - return nil, streamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method) - } - audience := "https://" + callHdr.Host + port + callHdr.Method[:pos] - data, err := c.GetRequestMetadata(ctx, audience) - if err != nil { - return nil, streamErrorf(codes.InvalidArgument, "transport: %v", err) - } - for k, v := range data { - authData[k] = v - } - } - t.mu.Lock() - if t.activeStreams == nil { - t.mu.Unlock() - return nil, ErrConnClosing - } - if t.state == draining { - t.mu.Unlock() - return nil, ErrStreamDrain - } - if t.state != reachable { - t.mu.Unlock() - return nil, ErrConnClosing - } - checkStreamsQuota := t.streamsQuota != nil - t.mu.Unlock() - if checkStreamsQuota { - sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire()) - if err != nil { - return nil, err - } - // Returns the quota balance back. - if sq > 1 { - t.streamsQuota.add(sq - 1) - } - } - if _, err := wait(ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - // Return the quota back now because there is no stream returned to the caller. - if _, ok := err.(StreamError); ok && checkStreamsQuota { - t.streamsQuota.add(1) - } - return nil, err - } - t.mu.Lock() - if t.state == draining { - t.mu.Unlock() - if checkStreamsQuota { - t.streamsQuota.add(1) - } - // Need to make t writable again so that the rpc in flight can still proceed. - t.writableChan <- 0 - return nil, ErrStreamDrain - } - if t.state != reachable { - t.mu.Unlock() - return nil, ErrConnClosing - } - s := t.newStream(ctx, callHdr) - t.activeStreams[s.id] = s - - // This stream is not counted when applySetings(...) initialize t.streamsQuota. - // Reset t.streamsQuota to the right value. - var reset bool - if !checkStreamsQuota && t.streamsQuota != nil { - reset = true - } - t.mu.Unlock() - if reset { - t.streamsQuota.reset(-1) - } - - // HPACK encodes various headers. Note that once WriteField(...) is - // called, the corresponding headers/continuation frame has to be sent - // because hpack.Encoder is stateful. - t.hBuf.Reset() - t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) - t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) - - if callHdr.SendCompress != "" { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) - } - if timeout > 0 { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) - } - for k, v := range authData { - // Capital header names are illegal in HTTP/2. - k = strings.ToLower(k) - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) - } - var ( - hasMD bool - endHeaders bool - ) - if md, ok := metadata.FromContext(ctx); ok { - hasMD = true - for k, v := range md { - // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. - if isReservedHeader(k) { - continue - } - for _, entry := range v { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) - } - } - } - first := true - // Sends the headers in a single batch even when they span multiple frames. - for !endHeaders { - size := t.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - var flush bool - if endHeaders && (hasMD || callHdr.Flush) { - flush = true - } - if first { - // Sends a HeadersFrame to server to start a new stream. - p := http2.HeadersFrameParam{ - StreamID: s.id, - BlockFragment: t.hBuf.Next(size), - EndStream: false, - EndHeaders: endHeaders, - } - // Do a force flush for the buffered frames iff it is the last headers frame - // and there is header metadata to be sent. Otherwise, there is flushing until - // the corresponding data frame is written. - err = t.framer.writeHeaders(flush, p) - first = false - } else { - // Sends Continuation frames for the leftover headers. - err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size)) - } - if err != nil { - t.notifyError(err) - return nil, connectionErrorf(true, err, "transport: %v", err) - } - } - t.writableChan <- 0 - return s, nil -} - -// CloseStream clears the footprint of a stream when the stream is not needed any more. -// This must not be executed in reader's goroutine. -func (t *http2Client) CloseStream(s *Stream, err error) { - var updateStreams bool - t.mu.Lock() - if t.activeStreams == nil { - t.mu.Unlock() - return - } - if t.streamsQuota != nil { - updateStreams = true - } - delete(t.activeStreams, s.id) - if t.state == draining && len(t.activeStreams) == 0 { - // The transport is draining and s is the last live stream on t. - t.mu.Unlock() - t.Close() - return - } - t.mu.Unlock() - if updateStreams { - t.streamsQuota.add(1) - } - s.mu.Lock() - if q := s.fc.resetPendingData(); q > 0 { - if n := t.fc.onRead(q); n > 0 { - t.controlBuf.put(&windowUpdate{0, n}) - } - } - if s.state == streamDone { - s.mu.Unlock() - return - } - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.state = streamDone - s.mu.Unlock() - if se, ok := err.(StreamError); ok && se.Code != codes.DeadlineExceeded { - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel}) - } -} - -// Close kicks off the shutdown process of the transport. This should be called -// only once on a transport. Once it is called, the transport should not be -// accessed any more. -func (t *http2Client) Close() (err error) { - t.mu.Lock() - if t.state == closing { - t.mu.Unlock() - return - } - if t.state == reachable || t.state == draining { - close(t.errorChan) - } - t.state = closing - t.mu.Unlock() - close(t.shutdownChan) - err = t.conn.Close() - t.mu.Lock() - streams := t.activeStreams - t.activeStreams = nil - t.mu.Unlock() - // Notify all active streams. - for _, s := range streams { - s.mu.Lock() - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.mu.Unlock() - s.write(recvMsg{err: ErrConnClosing}) - } - return -} - -func (t *http2Client) GracefulClose() error { - t.mu.Lock() - switch t.state { - case unreachable: - // The server may close the connection concurrently. t is not available for - // any streams. Close it now. - t.mu.Unlock() - t.Close() - return nil - case closing: - t.mu.Unlock() - return nil - } - // Notify the streams which were initiated after the server sent GOAWAY. - select { - case <-t.goAway: - n := t.prevGoAwayID - if n == 0 && t.nextID > 1 { - n = t.nextID - 2 - } - m := t.goAwayID + 2 - if m == 2 { - m = 1 - } - for i := m; i <= n; i += 2 { - if s, ok := t.activeStreams[i]; ok { - close(s.goAway) - } - } - default: - } - if t.state == draining { - t.mu.Unlock() - return nil - } - t.state = draining - active := len(t.activeStreams) - t.mu.Unlock() - if active == 0 { - return t.Close() - } - return nil -} - -// Write formats the data into HTTP2 data frame(s) and sends it out. The caller -// should proceed only if Write returns nil. -// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later -// if it improves the performance. -func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { - r := bytes.NewBuffer(data) - for { - var p []byte - if r.Len() > 0 { - size := http2MaxFrameLen - s.sendQuotaPool.add(0) - // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.sendQuotaPool.acquire()) - if err != nil { - return err - } - t.sendQuotaPool.add(0) - // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire()) - if err != nil { - if _, ok := err.(StreamError); ok || err == io.EOF { - t.sendQuotaPool.cancel() - } - return err - } - if sq < size { - size = sq - } - if tq < size { - size = tq - } - p = r.Next(size) - ps := len(p) - if ps < sq { - // Overbooked stream quota. Return it back. - s.sendQuotaPool.add(sq - ps) - } - if ps < tq { - // Overbooked transport quota. Return it back. - t.sendQuotaPool.add(tq - ps) - } - } - var ( - endStream bool - forceFlush bool - ) - if opts.Last && r.Len() == 0 { - endStream = true - } - // Indicate there is a writer who is about to write a data frame. - t.framer.adjustNumWriters(1) - // Got some quota. Try to acquire writing privilege on the transport. - if _, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.writableChan); err != nil { - if _, ok := err.(StreamError); ok || err == io.EOF { - // Return the connection quota back. - t.sendQuotaPool.add(len(p)) - } - if t.framer.adjustNumWriters(-1) == 0 { - // This writer is the last one in this batch and has the - // responsibility to flush the buffered frames. It queues - // a flush request to controlBuf instead of flushing directly - // in order to avoid the race with other writing or flushing. - t.controlBuf.put(&flushIO{}) - } - return err - } - select { - case <-s.ctx.Done(): - t.sendQuotaPool.add(len(p)) - if t.framer.adjustNumWriters(-1) == 0 { - t.controlBuf.put(&flushIO{}) - } - t.writableChan <- 0 - return ContextErr(s.ctx.Err()) - default: - } - if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 { - // Do a force flush iff this is last frame for the entire gRPC message - // and the caller is the only writer at this moment. - forceFlush = true - } - // If WriteData fails, all the pending streams will be handled - // by http2Client.Close(). No explicit CloseStream() needs to be - // invoked. - if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil { - t.notifyError(err) - return connectionErrorf(true, err, "transport: %v", err) - } - if t.framer.adjustNumWriters(-1) == 0 { - t.framer.flushWrite() - } - t.writableChan <- 0 - if r.Len() == 0 { - break - } - } - if !opts.Last { - return nil - } - s.mu.Lock() - if s.state != streamDone { - s.state = streamWriteDone - } - s.mu.Unlock() - return nil -} - -func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { - t.mu.Lock() - defer t.mu.Unlock() - s, ok := t.activeStreams[f.Header().StreamID] - return s, ok -} - -// updateWindow adjusts the inbound quota for the stream and the transport. -// Window updates will deliver to the controller for sending when -// the cumulative quota exceeds the corresponding threshold. -func (t *http2Client) updateWindow(s *Stream, n uint32) { - s.mu.Lock() - defer s.mu.Unlock() - if s.state == streamDone { - return - } - if w := t.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - if w := s.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{s.id, w}) - } -} - -func (t *http2Client) handleData(f *http2.DataFrame) { - size := len(f.Data()) - if err := t.fc.onData(uint32(size)); err != nil { - t.notifyError(connectionErrorf(true, err, "%v", err)) - return - } - // Select the right stream to dispatch. - s, ok := t.getStream(f) - if !ok { - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - return - } - if size > 0 { - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - // The stream has been closed. Release the corresponding quota. - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - return - } - if err := s.fc.onData(uint32(size)); err != nil { - s.state = streamDone - s.statusCode = codes.Internal - s.statusDesc = err.Error() - close(s.done) - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) - return - } - s.mu.Unlock() - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - data := make([]byte, size) - copy(data, f.Data()) - s.write(recvMsg{data: data}) - } - // The server has closed the stream without sending trailers. Record that - // the read direction is closed, and set the status appropriately. - if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return - } - s.state = streamDone - s.statusCode = codes.Internal - s.statusDesc = "server closed the stream without sending trailers" - close(s.done) - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) - } -} - -func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { - s, ok := t.getStream(f) - if !ok { - return - } - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return - } - s.state = streamDone - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)] - if !ok { - grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) - s.statusCode = codes.Unknown - } - s.statusDesc = fmt.Sprintf("stream terminated by RST_STREAM with error code: %d", f.ErrCode) - close(s.done) - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) -} - -func (t *http2Client) handleSettings(f *http2.SettingsFrame) { - if f.IsAck() { - return - } - var ss []http2.Setting - f.ForeachSetting(func(s http2.Setting) error { - ss = append(ss, s) - return nil - }) - // The settings will be applied once the ack is sent. - t.controlBuf.put(&settings{ack: true, ss: ss}) -} - -func (t *http2Client) handlePing(f *http2.PingFrame) { - pingAck := &ping{ack: true} - copy(pingAck.data[:], f.Data[:]) - t.controlBuf.put(pingAck) -} - -func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { - t.mu.Lock() - if t.state == reachable || t.state == draining { - if f.LastStreamID > 0 && f.LastStreamID%2 != 1 { - t.mu.Unlock() - t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: stream ID %d is even", f.LastStreamID)) - return - } - select { - case <-t.goAway: - id := t.goAwayID - // t.goAway has been closed (i.e.,multiple GoAways). - if id < f.LastStreamID { - t.mu.Unlock() - t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: previously recv GOAWAY frame with LastStramID %d, currently recv %d", id, f.LastStreamID)) - return - } - t.prevGoAwayID = id - t.goAwayID = f.LastStreamID - t.mu.Unlock() - return - default: - } - t.goAwayID = f.LastStreamID - close(t.goAway) - } - t.mu.Unlock() -} - -func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { - id := f.Header().StreamID - incr := f.Increment - if id == 0 { - t.sendQuotaPool.add(int(incr)) - return - } - if s, ok := t.getStream(f); ok { - s.sendQuotaPool.add(int(incr)) - } -} - -// operateHeaders takes action on the decoded headers. -func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { - s, ok := t.getStream(frame) - if !ok { - return - } - var state decodeState - for _, hf := range frame.Fields { - state.processHeaderField(hf) - } - if state.err != nil { - s.mu.Lock() - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.mu.Unlock() - s.write(recvMsg{err: state.err}) - // Something wrong. Stops reading even when there is remaining. - return - } - - endStream := frame.StreamEnded() - - s.mu.Lock() - if !endStream { - s.recvCompress = state.encoding - } - if !s.headerDone { - if !endStream && len(state.mdata) > 0 { - s.header = state.mdata - } - close(s.headerChan) - s.headerDone = true - } - if !endStream || s.state == streamDone { - s.mu.Unlock() - return - } - - if len(state.mdata) > 0 { - s.trailer = state.mdata - } - s.statusCode = state.statusCode - s.statusDesc = state.statusDesc - close(s.done) - s.state = streamDone - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) -} - -func handleMalformedHTTP2(s *Stream, err error) { - s.mu.Lock() - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.mu.Unlock() - s.write(recvMsg{err: err}) -} - -// reader runs as a separate goroutine in charge of reading data from network -// connection. -// -// TODO(zhaoq): currently one reader per transport. Investigate whether this is -// optimal. -// TODO(zhaoq): Check the validity of the incoming frame sequence. -func (t *http2Client) reader() { - // Check the validity of server preface. - frame, err := t.framer.readFrame() - if err != nil { - t.notifyError(err) - return - } - sf, ok := frame.(*http2.SettingsFrame) - if !ok { - t.notifyError(err) - return - } - t.handleSettings(sf) - - // loop to keep reading incoming messages on this transport. - for { - frame, err := t.framer.readFrame() - if err != nil { - // Abort an active stream if the http2.Framer returns a - // http2.StreamError. This can happen only if the server's response - // is malformed http2. - if se, ok := err.(http2.StreamError); ok { - t.mu.Lock() - s := t.activeStreams[se.StreamID] - t.mu.Unlock() - if s != nil { - // use error detail to provide better err message - handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail())) - } - continue - } else { - // Transport error. - t.notifyError(err) - return - } - } - switch frame := frame.(type) { - case *http2.MetaHeadersFrame: - t.operateHeaders(frame) - case *http2.DataFrame: - t.handleData(frame) - case *http2.RSTStreamFrame: - t.handleRSTStream(frame) - case *http2.SettingsFrame: - t.handleSettings(frame) - case *http2.PingFrame: - t.handlePing(frame) - case *http2.GoAwayFrame: - t.handleGoAway(frame) - case *http2.WindowUpdateFrame: - t.handleWindowUpdate(frame) - default: - grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame) - } - } -} - -func (t *http2Client) applySettings(ss []http2.Setting) { - for _, s := range ss { - switch s.ID { - case http2.SettingMaxConcurrentStreams: - // TODO(zhaoq): This is a hack to avoid significant refactoring of the - // code to deal with the unrealistic int32 overflow. Probably will try - // to find a better way to handle this later. - if s.Val > math.MaxInt32 { - s.Val = math.MaxInt32 - } - t.mu.Lock() - reset := t.streamsQuota != nil - if !reset { - t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams)) - } - ms := t.maxStreams - t.maxStreams = int(s.Val) - t.mu.Unlock() - if reset { - t.streamsQuota.reset(int(s.Val) - ms) - } - case http2.SettingInitialWindowSize: - t.mu.Lock() - for _, stream := range t.activeStreams { - // Adjust the sending quota for each stream. - stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) - } - t.streamSendQuota = s.Val - t.mu.Unlock() - } - } -} - -// controller running in a separate goroutine takes charge of sending control -// frames (e.g., window update, reset stream, setting, etc.) to the server. -func (t *http2Client) controller() { - for { - select { - case i := <-t.controlBuf.get(): - t.controlBuf.load() - select { - case <-t.writableChan: - switch i := i.(type) { - case *windowUpdate: - t.framer.writeWindowUpdate(true, i.streamID, i.increment) - case *settings: - if i.ack { - t.framer.writeSettingsAck(true) - t.applySettings(i.ss) - } else { - t.framer.writeSettings(true, i.ss...) - } - case *resetStream: - t.framer.writeRSTStream(true, i.streamID, i.code) - case *flushIO: - t.framer.flushWrite() - case *ping: - t.framer.writePing(true, i.ack, i.data) - default: - grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i) - } - t.writableChan <- 0 - continue - case <-t.shutdownChan: - return - } - case <-t.shutdownChan: - return - } - } -} - -func (t *http2Client) Error() <-chan struct{} { - return t.errorChan -} - -func (t *http2Client) GoAway() <-chan struct{} { - return t.goAway -} - -func (t *http2Client) notifyError(err error) { - t.mu.Lock() - // make sure t.errorChan is closed only once. - if t.state == draining { - t.mu.Unlock() - t.Close() - return - } - if t.state == reachable { - t.state = unreachable - close(t.errorChan) - grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err) - } - t.mu.Unlock() -} diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go deleted file mode 100644 index f753c4f1ead6..000000000000 --- a/vendor/google.golang.org/grpc/transport/http2_server.go +++ /dev/null @@ -1,774 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "bytes" - "errors" - "io" - "math" - "net" - "strconv" - "sync" - - "golang.org/x/net/context" - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" -) - -// ErrIllegalHeaderWrite indicates that setting header is illegal because of -// the stream's state. -var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") - -// http2Server implements the ServerTransport interface with HTTP2. -type http2Server struct { - conn net.Conn - maxStreamID uint32 // max stream ID ever seen - authInfo credentials.AuthInfo // auth info about the connection - // writableChan synchronizes write access to the transport. - // A writer acquires the write lock by receiving a value on writableChan - // and releases it by sending on writableChan. - writableChan chan int - // shutdownChan is closed when Close is called. - // Blocking operations should select on shutdownChan to avoid - // blocking forever after Close. - shutdownChan chan struct{} - framer *framer - hBuf *bytes.Buffer // the buffer for HPACK encoding - hEnc *hpack.Encoder // HPACK encoder - - // The max number of concurrent streams. - maxStreams uint32 - // controlBuf delivers all the control related tasks (e.g., window - // updates, reset streams, and various settings) to the controller. - controlBuf *recvBuffer - fc *inFlow - // sendQuotaPool provides flow control to outbound message. - sendQuotaPool *quotaPool - - mu sync.Mutex // guard the following - state transportState - activeStreams map[uint32]*Stream - // the per-stream outbound flow control window size set by the peer. - streamSendQuota uint32 -} - -// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is -// returned if something goes wrong. -func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (_ ServerTransport, err error) { - framer := newFramer(conn) - // Send initial settings as connection preface to client. - var settings []http2.Setting - // TODO(zhaoq): Have a better way to signal "no limit" because 0 is - // permitted in the HTTP2 spec. - if maxStreams == 0 { - maxStreams = math.MaxUint32 - } else { - settings = append(settings, http2.Setting{ - ID: http2.SettingMaxConcurrentStreams, - Val: maxStreams, - }) - } - if initialWindowSize != defaultWindowSize { - settings = append(settings, http2.Setting{ - ID: http2.SettingInitialWindowSize, - Val: uint32(initialWindowSize)}) - } - if err := framer.writeSettings(true, settings...); err != nil { - return nil, connectionErrorf(true, err, "transport: %v", err) - } - // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { - if err := framer.writeWindowUpdate(true, 0, delta); err != nil { - return nil, connectionErrorf(true, err, "transport: %v", err) - } - } - var buf bytes.Buffer - t := &http2Server{ - conn: conn, - authInfo: authInfo, - framer: framer, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - maxStreams: maxStreams, - controlBuf: newRecvBuffer(), - fc: &inFlow{limit: initialConnWindowSize}, - sendQuotaPool: newQuotaPool(defaultWindowSize), - state: reachable, - writableChan: make(chan int, 1), - shutdownChan: make(chan struct{}), - activeStreams: make(map[uint32]*Stream), - streamSendQuota: defaultWindowSize, - } - go t.controller() - t.writableChan <- 0 - return t, nil -} - -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) (close bool) { - buf := newRecvBuffer() - s := &Stream{ - id: frame.Header().StreamID, - st: t, - buf: buf, - fc: &inFlow{limit: initialWindowSize}, - } - - var state decodeState - for _, hf := range frame.Fields { - state.processHeaderField(hf) - } - if err := state.err; err != nil { - if se, ok := err.(StreamError); ok { - t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) - } - return - } - - if frame.StreamEnded() { - // s is just created by the caller. No lock needed. - s.state = streamReadDone - } - s.recvCompress = state.encoding - if state.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout) - } else { - s.ctx, s.cancel = context.WithCancel(context.TODO()) - } - pr := &peer.Peer{ - Addr: t.conn.RemoteAddr(), - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - s.ctx = peer.NewContext(s.ctx, pr) - // Cache the current stream to the context so that the server application - // can find out. Required when the server wants to send some metadata - // back to the client (unary call only). - s.ctx = newContextWithStream(s.ctx, s) - // Attach the received metadata to the context. - if len(state.mdata) > 0 { - s.ctx = metadata.NewContext(s.ctx, state.mdata) - } - - s.dec = &recvBufferReader{ - ctx: s.ctx, - recv: s.buf, - } - s.recvCompress = state.encoding - s.method = state.method - t.mu.Lock() - if t.state != reachable { - t.mu.Unlock() - return - } - if uint32(len(t.activeStreams)) >= t.maxStreams { - t.mu.Unlock() - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) - return - } - if s.id%2 != 1 || s.id <= t.maxStreamID { - t.mu.Unlock() - // illegal gRPC stream id. - grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", s.id) - return true - } - t.maxStreamID = s.id - s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) - t.activeStreams[s.id] = s - t.mu.Unlock() - s.windowHandler = func(n int) { - t.updateWindow(s, uint32(n)) - } - handle(s) - return -} - -// HandleStreams receives incoming streams using the given handler. This is -// typically run in a separate goroutine. -func (t *http2Server) HandleStreams(handle func(*Stream)) { - // Check the validity of client preface. - preface := make([]byte, len(clientPreface)) - if _, err := io.ReadFull(t.conn, preface); err != nil { - grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) - t.Close() - return - } - if !bytes.Equal(preface, clientPreface) { - grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) - t.Close() - return - } - - frame, err := t.framer.readFrame() - if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() - return - } - if err != nil { - grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) - t.Close() - return - } - sf, ok := frame.(*http2.SettingsFrame) - if !ok { - grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) - t.Close() - return - } - t.handleSettings(sf) - - for { - frame, err := t.framer.readFrame() - if err != nil { - if se, ok := err.(http2.StreamError); ok { - t.mu.Lock() - s := t.activeStreams[se.StreamID] - t.mu.Unlock() - if s != nil { - t.closeStream(s) - } - t.controlBuf.put(&resetStream{se.StreamID, se.Code}) - continue - } - if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() - return - } - grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) - t.Close() - return - } - switch frame := frame.(type) { - case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle) { - t.Close() - break - } - case *http2.DataFrame: - t.handleData(frame) - case *http2.RSTStreamFrame: - t.handleRSTStream(frame) - case *http2.SettingsFrame: - t.handleSettings(frame) - case *http2.PingFrame: - t.handlePing(frame) - case *http2.WindowUpdateFrame: - t.handleWindowUpdate(frame) - case *http2.GoAwayFrame: - // TODO: Handle GoAway from the client appropriately. - default: - grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) - } - } -} - -func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { - t.mu.Lock() - defer t.mu.Unlock() - if t.activeStreams == nil { - // The transport is closing. - return nil, false - } - s, ok := t.activeStreams[f.Header().StreamID] - if !ok { - // The stream is already done. - return nil, false - } - return s, true -} - -// updateWindow adjusts the inbound quota for the stream and the transport. -// Window updates will deliver to the controller for sending when -// the cumulative quota exceeds the corresponding threshold. -func (t *http2Server) updateWindow(s *Stream, n uint32) { - s.mu.Lock() - defer s.mu.Unlock() - if s.state == streamDone { - return - } - if w := t.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - if w := s.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{s.id, w}) - } -} - -func (t *http2Server) handleData(f *http2.DataFrame) { - size := len(f.Data()) - if err := t.fc.onData(uint32(size)); err != nil { - grpclog.Printf("transport: http2Server %v", err) - t.Close() - return - } - // Select the right stream to dispatch. - s, ok := t.getStream(f) - if !ok { - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - return - } - if size > 0 { - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - // The stream has been closed. Release the corresponding quota. - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - return - } - if err := s.fc.onData(uint32(size)); err != nil { - s.mu.Unlock() - t.closeStream(s) - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) - return - } - s.mu.Unlock() - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - data := make([]byte, size) - copy(data, f.Data()) - s.write(recvMsg{data: data}) - } - if f.Header().Flags.Has(http2.FlagDataEndStream) { - // Received the end of stream from the client. - s.mu.Lock() - if s.state != streamDone { - s.state = streamReadDone - } - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) - } -} - -func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { - s, ok := t.getStream(f) - if !ok { - return - } - t.closeStream(s) -} - -func (t *http2Server) handleSettings(f *http2.SettingsFrame) { - if f.IsAck() { - return - } - var ss []http2.Setting - f.ForeachSetting(func(s http2.Setting) error { - ss = append(ss, s) - return nil - }) - // The settings will be applied once the ack is sent. - t.controlBuf.put(&settings{ack: true, ss: ss}) -} - -func (t *http2Server) handlePing(f *http2.PingFrame) { - pingAck := &ping{ack: true} - copy(pingAck.data[:], f.Data[:]) - t.controlBuf.put(pingAck) -} - -func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { - id := f.Header().StreamID - incr := f.Increment - if id == 0 { - t.sendQuotaPool.add(int(incr)) - return - } - if s, ok := t.getStream(f); ok { - s.sendQuotaPool.add(int(incr)) - } -} - -func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error { - first := true - endHeaders := false - var err error - // Sends the headers in a single batch. - for !endHeaders { - size := t.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - if first { - p := http2.HeadersFrameParam{ - StreamID: s.id, - BlockFragment: b.Next(size), - EndStream: endStream, - EndHeaders: endHeaders, - } - err = t.framer.writeHeaders(endHeaders, p) - first = false - } else { - err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size)) - } - if err != nil { - t.Close() - return connectionErrorf(true, err, "transport: %v", err) - } - } - return nil -} - -// WriteHeader sends the header metedata md back to the client. -func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - s.mu.Lock() - if s.headerOk || s.state == streamDone { - s.mu.Unlock() - return ErrIllegalHeaderWrite - } - s.headerOk = true - s.mu.Unlock() - if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - return err - } - t.hBuf.Reset() - t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) - if s.sendCompress != "" { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) - } - for k, v := range md { - if isReservedHeader(k) { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - continue - } - for _, entry := range v { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) - } - } - if err := t.writeHeaders(s, t.hBuf, false); err != nil { - return err - } - t.writableChan <- 0 - return nil -} - -// WriteStatus sends stream status to the client and terminates the stream. -// There is no further I/O operations being able to perform on this stream. -// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early -// OK is adopted. -func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { - var headersSent bool - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return nil - } - if s.headerOk { - headersSent = true - } - s.mu.Unlock() - if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - return err - } - t.hBuf.Reset() - if !headersSent { - t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) - } - t.hEnc.WriteField( - hpack.HeaderField{ - Name: "grpc-status", - Value: strconv.Itoa(int(statusCode)), - }) - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(statusDesc)}) - // Attach the trailer metadata. - for k, v := range s.trailer { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - if isReservedHeader(k) { - continue - } - for _, entry := range v { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) - } - } - if err := t.writeHeaders(s, t.hBuf, true); err != nil { - t.Close() - return err - } - t.closeStream(s) - t.writableChan <- 0 - return nil -} - -// Write converts the data into HTTP2 data frame and sends it out. Non-nil error -// is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { - // TODO(zhaoq): Support multi-writers for a single stream. - var writeHeaderFrame bool - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return streamErrorf(codes.Unknown, "the stream has been done") - } - if !s.headerOk { - writeHeaderFrame = true - s.headerOk = true - } - s.mu.Unlock() - if writeHeaderFrame { - if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - return err - } - t.hBuf.Reset() - t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) - if s.sendCompress != "" { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) - } - p := http2.HeadersFrameParam{ - StreamID: s.id, - BlockFragment: t.hBuf.Bytes(), - EndHeaders: true, - } - if err := t.framer.writeHeaders(false, p); err != nil { - t.Close() - return connectionErrorf(true, err, "transport: %v", err) - } - t.writableChan <- 0 - } - r := bytes.NewBuffer(data) - for { - if r.Len() == 0 { - return nil - } - size := http2MaxFrameLen - s.sendQuotaPool.add(0) - // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire()) - if err != nil { - return err - } - t.sendQuotaPool.add(0) - // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire()) - if err != nil { - if _, ok := err.(StreamError); ok { - t.sendQuotaPool.cancel() - } - return err - } - if sq < size { - size = sq - } - if tq < size { - size = tq - } - p := r.Next(size) - ps := len(p) - if ps < sq { - // Overbooked stream quota. Return it back. - s.sendQuotaPool.add(sq - ps) - } - if ps < tq { - // Overbooked transport quota. Return it back. - t.sendQuotaPool.add(tq - ps) - } - t.framer.adjustNumWriters(1) - // Got some quota. Try to acquire writing privilege on the - // transport. - if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - if _, ok := err.(StreamError); ok { - // Return the connection quota back. - t.sendQuotaPool.add(ps) - } - if t.framer.adjustNumWriters(-1) == 0 { - // This writer is the last one in this batch and has the - // responsibility to flush the buffered frames. It queues - // a flush request to controlBuf instead of flushing directly - // in order to avoid the race with other writing or flushing. - t.controlBuf.put(&flushIO{}) - } - return err - } - select { - case <-s.ctx.Done(): - t.sendQuotaPool.add(ps) - if t.framer.adjustNumWriters(-1) == 0 { - t.controlBuf.put(&flushIO{}) - } - t.writableChan <- 0 - return ContextErr(s.ctx.Err()) - default: - } - var forceFlush bool - if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last { - forceFlush = true - } - if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil { - t.Close() - return connectionErrorf(true, err, "transport: %v", err) - } - if t.framer.adjustNumWriters(-1) == 0 { - t.framer.flushWrite() - } - t.writableChan <- 0 - } - -} - -func (t *http2Server) applySettings(ss []http2.Setting) { - for _, s := range ss { - if s.ID == http2.SettingInitialWindowSize { - t.mu.Lock() - defer t.mu.Unlock() - for _, stream := range t.activeStreams { - stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) - } - t.streamSendQuota = s.Val - } - - } -} - -// controller running in a separate goroutine takes charge of sending control -// frames (e.g., window update, reset stream, setting, etc.) to the server. -func (t *http2Server) controller() { - for { - select { - case i := <-t.controlBuf.get(): - t.controlBuf.load() - select { - case <-t.writableChan: - switch i := i.(type) { - case *windowUpdate: - t.framer.writeWindowUpdate(true, i.streamID, i.increment) - case *settings: - if i.ack { - t.framer.writeSettingsAck(true) - t.applySettings(i.ss) - } else { - t.framer.writeSettings(true, i.ss...) - } - case *resetStream: - t.framer.writeRSTStream(true, i.streamID, i.code) - case *goAway: - t.mu.Lock() - if t.state == closing { - t.mu.Unlock() - // The transport is closing. - return - } - sid := t.maxStreamID - t.state = draining - t.mu.Unlock() - t.framer.writeGoAway(true, sid, http2.ErrCodeNo, nil) - case *flushIO: - t.framer.flushWrite() - case *ping: - t.framer.writePing(true, i.ack, i.data) - default: - grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i) - } - t.writableChan <- 0 - continue - case <-t.shutdownChan: - return - } - case <-t.shutdownChan: - return - } - } -} - -// Close starts shutting down the http2Server transport. -// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This -// could cause some resource issue. Revisit this later. -func (t *http2Server) Close() (err error) { - t.mu.Lock() - if t.state == closing { - t.mu.Unlock() - return errors.New("transport: Close() was already called") - } - t.state = closing - streams := t.activeStreams - t.activeStreams = nil - t.mu.Unlock() - close(t.shutdownChan) - err = t.conn.Close() - // Cancel all active streams. - for _, s := range streams { - s.cancel() - } - return -} - -// closeStream clears the footprint of a stream when the stream is not needed -// any more. -func (t *http2Server) closeStream(s *Stream) { - t.mu.Lock() - delete(t.activeStreams, s.id) - if t.state == draining && len(t.activeStreams) == 0 { - defer t.Close() - } - t.mu.Unlock() - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() - s.mu.Lock() - if q := s.fc.resetPendingData(); q > 0 { - if w := t.fc.onRead(q); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - } - if s.state == streamDone { - s.mu.Unlock() - return - } - s.state = streamDone - s.mu.Unlock() -} - -func (t *http2Server) RemoteAddr() net.Addr { - return t.conn.RemoteAddr() -} - -func (t *http2Server) Drain() { - t.controlBuf.put(&goAway{}) -} diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go deleted file mode 100644 index b024594e7f2e..000000000000 --- a/vendor/google.golang.org/grpc/transport/http_util.go +++ /dev/null @@ -1,510 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "bufio" - "bytes" - "fmt" - "io" - "net" - "strconv" - "strings" - "sync/atomic" - "time" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" -) - -const ( - // The primary user agent - primaryUA = "grpc-go/1.0" - // http2MaxFrameLen specifies the max length of a HTTP2 frame. - http2MaxFrameLen = 16384 // 16KB frame - // http://http2.github.io/http2-spec/#SettingValues - http2InitHeaderTableSize = 4096 - // http2IOBufSize specifies the buffer size for sending frames. - http2IOBufSize = 32 * 1024 -) - -var ( - clientPreface = []byte(http2.ClientPreface) - http2ErrConvTab = map[http2.ErrCode]codes.Code{ - http2.ErrCodeNo: codes.Internal, - http2.ErrCodeProtocol: codes.Internal, - http2.ErrCodeInternal: codes.Internal, - http2.ErrCodeFlowControl: codes.ResourceExhausted, - http2.ErrCodeSettingsTimeout: codes.Internal, - http2.ErrCodeStreamClosed: codes.Internal, - http2.ErrCodeFrameSize: codes.Internal, - http2.ErrCodeRefusedStream: codes.Unavailable, - http2.ErrCodeCancel: codes.Canceled, - http2.ErrCodeCompression: codes.Internal, - http2.ErrCodeConnect: codes.Internal, - http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, - http2.ErrCodeInadequateSecurity: codes.PermissionDenied, - http2.ErrCodeHTTP11Required: codes.FailedPrecondition, - } - statusCodeConvTab = map[codes.Code]http2.ErrCode{ - codes.Internal: http2.ErrCodeInternal, - codes.Canceled: http2.ErrCodeCancel, - codes.Unavailable: http2.ErrCodeRefusedStream, - codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, - codes.PermissionDenied: http2.ErrCodeInadequateSecurity, - } -) - -// Records the states during HPACK decoding. Must be reset once the -// decoding of the entire headers are finished. -type decodeState struct { - err error // first error encountered decoding - - encoding string - // statusCode caches the stream status received from the trailer - // the server sent. Client side only. - statusCode codes.Code - statusDesc string - // Server side only fields. - timeoutSet bool - timeout time.Duration - method string - // key-value metadata map from the peer. - mdata map[string][]string -} - -// isReservedHeader checks whether hdr belongs to HTTP2 headers -// reserved by gRPC protocol. Any other headers are classified as the -// user-specified metadata. -func isReservedHeader(hdr string) bool { - if hdr != "" && hdr[0] == ':' { - return true - } - switch hdr { - case "content-type", - "grpc-message-type", - "grpc-encoding", - "grpc-message", - "grpc-status", - "grpc-timeout", - "te": - return true - default: - return false - } -} - -// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders -// that should be propagated into metadata visible to users. -func isWhitelistedPseudoHeader(hdr string) bool { - switch hdr { - case ":authority": - return true - default: - return false - } -} - -func (d *decodeState) setErr(err error) { - if d.err == nil { - d.err = err - } -} - -func validContentType(t string) bool { - e := "application/grpc" - if !strings.HasPrefix(t, e) { - return false - } - // Support variations on the content-type - // (e.g. "application/grpc+blah", "application/grpc;blah"). - if len(t) > len(e) && t[len(e)] != '+' && t[len(e)] != ';' { - return false - } - return true -} - -func (d *decodeState) processHeaderField(f hpack.HeaderField) { - switch f.Name { - case "content-type": - if !validContentType(f.Value) { - d.setErr(streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)) - return - } - case "grpc-encoding": - d.encoding = f.Value - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.setErr(streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)) - return - } - d.statusCode = codes.Code(code) - case "grpc-message": - d.statusDesc = decodeGrpcMessage(f.Value) - case "grpc-timeout": - d.timeoutSet = true - var err error - d.timeout, err = decodeTimeout(f.Value) - if err != nil { - d.setErr(streamErrorf(codes.Internal, "transport: malformed time-out: %v", err)) - return - } - case ":path": - d.method = f.Value - default: - if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) { - if f.Name == "user-agent" { - i := strings.LastIndex(f.Value, " ") - if i == -1 { - // There is no application user agent string being set. - return - } - // Extract the application user agent string. - f.Value = f.Value[:i] - } - if d.mdata == nil { - d.mdata = make(map[string][]string) - } - k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) - if err != nil { - grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) - return - } - d.mdata[k] = append(d.mdata[k], v) - } - } -} - -type timeoutUnit uint8 - -const ( - hour timeoutUnit = 'H' - minute timeoutUnit = 'M' - second timeoutUnit = 'S' - millisecond timeoutUnit = 'm' - microsecond timeoutUnit = 'u' - nanosecond timeoutUnit = 'n' -) - -func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { - switch u { - case hour: - return time.Hour, true - case minute: - return time.Minute, true - case second: - return time.Second, true - case millisecond: - return time.Millisecond, true - case microsecond: - return time.Microsecond, true - case nanosecond: - return time.Nanosecond, true - default: - } - return -} - -const maxTimeoutValue int64 = 100000000 - 1 - -// div does integer division and round-up the result. Note that this is -// equivalent to (d+r-1)/r but has less chance to overflow. -func div(d, r time.Duration) int64 { - if m := d % r; m > 0 { - return int64(d/r + 1) - } - return int64(d / r) -} - -// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. -func encodeTimeout(t time.Duration) string { - if d := div(t, time.Nanosecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "n" - } - if d := div(t, time.Microsecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "u" - } - if d := div(t, time.Millisecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "m" - } - if d := div(t, time.Second); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "S" - } - if d := div(t, time.Minute); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "M" - } - // Note that maxTimeoutValue * time.Hour > MaxInt64. - return strconv.FormatInt(div(t, time.Hour), 10) + "H" -} - -func decodeTimeout(s string) (time.Duration, error) { - size := len(s) - if size < 2 { - return 0, fmt.Errorf("transport: timeout string is too short: %q", s) - } - unit := timeoutUnit(s[size-1]) - d, ok := timeoutUnitToDuration(unit) - if !ok { - return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) - } - t, err := strconv.ParseInt(s[:size-1], 10, 64) - if err != nil { - return 0, err - } - return d * time.Duration(t), nil -} - -const ( - spaceByte = ' ' - tildaByte = '~' - percentByte = '%' -) - -// encodeGrpcMessage is used to encode status code in header field -// "grpc-message". -// It checks to see if each individual byte in msg is an -// allowable byte, and then either percent encoding or passing it through. -// When percent encoding, the byte is converted into hexadecimal notation -// with a '%' prepended. -func encodeGrpcMessage(msg string) string { - if msg == "" { - return "" - } - lenMsg := len(msg) - for i := 0; i < lenMsg; i++ { - c := msg[i] - if !(c >= spaceByte && c < tildaByte && c != percentByte) { - return encodeGrpcMessageUnchecked(msg) - } - } - return msg -} - -func encodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer - lenMsg := len(msg) - for i := 0; i < lenMsg; i++ { - c := msg[i] - if c >= spaceByte && c < tildaByte && c != percentByte { - buf.WriteByte(c) - } else { - buf.WriteString(fmt.Sprintf("%%%02X", c)) - } - } - return buf.String() -} - -// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. -func decodeGrpcMessage(msg string) string { - if msg == "" { - return "" - } - lenMsg := len(msg) - for i := 0; i < lenMsg; i++ { - if msg[i] == percentByte && i+2 < lenMsg { - return decodeGrpcMessageUnchecked(msg) - } - } - return msg -} - -func decodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer - lenMsg := len(msg) - for i := 0; i < lenMsg; i++ { - c := msg[i] - if c == percentByte && i+2 < lenMsg { - parsed, err := strconv.ParseInt(msg[i+1:i+3], 16, 8) - if err != nil { - buf.WriteByte(c) - } else { - buf.WriteByte(byte(parsed)) - i += 2 - } - } else { - buf.WriteByte(c) - } - } - return buf.String() -} - -type framer struct { - numWriters int32 - reader io.Reader - writer *bufio.Writer - fr *http2.Framer -} - -func newFramer(conn net.Conn) *framer { - f := &framer{ - reader: bufio.NewReaderSize(conn, http2IOBufSize), - writer: bufio.NewWriterSize(conn, http2IOBufSize), - } - f.fr = http2.NewFramer(f.writer, f.reader) - f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) - return f -} - -func (f *framer) adjustNumWriters(i int32) int32 { - return atomic.AddInt32(&f.numWriters, i) -} - -// The following writeXXX functions can only be called when the caller gets -// unblocked from writableChan channel (i.e., owns the privilege to write). - -func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error { - if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error { - if err := f.fr.WriteData(streamID, endStream, data); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error { - if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error { - if err := f.fr.WriteHeaders(p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error { - if err := f.fr.WritePing(ack, data); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error { - if err := f.fr.WritePriority(streamID, p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error { - if err := f.fr.WritePushPromise(p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error { - if err := f.fr.WriteRSTStream(streamID, code); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error { - if err := f.fr.WriteSettings(settings...); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeSettingsAck(forceFlush bool) error { - if err := f.fr.WriteSettingsAck(); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error { - if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) flushWrite() error { - return f.writer.Flush() -} - -func (f *framer) readFrame() (http2.Frame, error) { - return f.fr.ReadFrame() -} - -func (f *framer) errorDetail() error { - return f.fr.ErrorDetail() -} diff --git a/vendor/google.golang.org/grpc/transport/pre_go16.go b/vendor/google.golang.org/grpc/transport/pre_go16.go deleted file mode 100644 index 33d91c17c4e1..000000000000 --- a/vendor/google.golang.org/grpc/transport/pre_go16.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build !go1.6 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "net" - "time" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - var dialer net.Dialer - if deadline, ok := ctx.Deadline(); ok { - dialer.Timeout = deadline.Sub(time.Now()) - } - return dialer.Dial(network, address) -} diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go deleted file mode 100644 index 4201d7bd16b5..000000000000 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ /dev/null @@ -1,579 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -/* -Package transport defines and implements message oriented communication channel -to complete various transactions (e.g., an RPC). -*/ -package transport - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "sync" - - "golang.org/x/net/context" - "golang.org/x/net/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" -) - -// recvMsg represents the received msg from the transport. All transport -// protocol specific info has been removed. -type recvMsg struct { - data []byte - // nil: received some data - // io.EOF: stream is completed. data is nil. - // other non-nil error: transport failure. data is nil. - err error -} - -func (*recvMsg) item() {} - -// All items in an out of a recvBuffer should be the same type. -type item interface { - item() -} - -// recvBuffer is an unbounded channel of item. -type recvBuffer struct { - c chan item - mu sync.Mutex - backlog []item -} - -func newRecvBuffer() *recvBuffer { - b := &recvBuffer{ - c: make(chan item, 1), - } - return b -} - -func (b *recvBuffer) put(r item) { - b.mu.Lock() - defer b.mu.Unlock() - if len(b.backlog) == 0 { - select { - case b.c <- r: - return - default: - } - } - b.backlog = append(b.backlog, r) -} - -func (b *recvBuffer) load() { - b.mu.Lock() - defer b.mu.Unlock() - if len(b.backlog) > 0 { - select { - case b.c <- b.backlog[0]: - b.backlog = b.backlog[1:] - default: - } - } -} - -// get returns the channel that receives an item in the buffer. -// -// Upon receipt of an item, the caller should call load to send another -// item onto the channel if there is any. -func (b *recvBuffer) get() <-chan item { - return b.c -} - -// recvBufferReader implements io.Reader interface to read the data from -// recvBuffer. -type recvBufferReader struct { - ctx context.Context - goAway chan struct{} - recv *recvBuffer - last *bytes.Reader // Stores the remaining data in the previous calls. - err error -} - -// Read reads the next len(p) bytes from last. If last is drained, it tries to -// read additional data from recv. It blocks if there no additional data available -// in recv. If Read returns any non-nil error, it will continue to return that error. -func (r *recvBufferReader) Read(p []byte) (n int, err error) { - if r.err != nil { - return 0, r.err - } - defer func() { r.err = err }() - if r.last != nil && r.last.Len() > 0 { - // Read remaining data left in last call. - return r.last.Read(p) - } - select { - case <-r.ctx.Done(): - return 0, ContextErr(r.ctx.Err()) - case <-r.goAway: - return 0, ErrStreamDrain - case i := <-r.recv.get(): - r.recv.load() - m := i.(*recvMsg) - if m.err != nil { - return 0, m.err - } - r.last = bytes.NewReader(m.data) - return r.last.Read(p) - } -} - -type streamState uint8 - -const ( - streamActive streamState = iota - streamWriteDone // EndStream sent - streamReadDone // EndStream received - streamDone // the entire stream is finished. -) - -// Stream represents an RPC in the transport layer. -type Stream struct { - id uint32 - // nil for client side Stream. - st ServerTransport - // ctx is the associated context of the stream. - ctx context.Context - // cancel is always nil for client side Stream. - cancel context.CancelFunc - // done is closed when the final status arrives. - done chan struct{} - // goAway is closed when the server sent GoAways signal before this stream was initiated. - goAway chan struct{} - // method records the associated RPC method of the stream. - method string - recvCompress string - sendCompress string - buf *recvBuffer - dec io.Reader - fc *inFlow - recvQuota uint32 - // The accumulated inbound quota pending for window update. - updateQuota uint32 - // The handler to control the window update procedure for both this - // particular stream and the associated transport. - windowHandler func(int) - - sendQuotaPool *quotaPool - // Close headerChan to indicate the end of reception of header metadata. - headerChan chan struct{} - // header caches the received header metadata. - header metadata.MD - // The key-value map of trailer metadata. - trailer metadata.MD - - mu sync.RWMutex // guard the following - // headerOK becomes true from the first header is about to send. - headerOk bool - state streamState - // true iff headerChan is closed. Used to avoid closing headerChan - // multiple times. - headerDone bool - // the status received from the server. - statusCode codes.Code - statusDesc string -} - -// RecvCompress returns the compression algorithm applied to the inbound -// message. It is empty string if there is no compression applied. -func (s *Stream) RecvCompress() string { - return s.recvCompress -} - -// SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(str string) { - s.sendCompress = str -} - -// Done returns a chanel which is closed when it receives the final status -// from the server. -func (s *Stream) Done() <-chan struct{} { - return s.done -} - -// GoAway returns a channel which is closed when the server sent GoAways signal -// before this stream was initiated. -func (s *Stream) GoAway() <-chan struct{} { - return s.goAway -} - -// Header acquires the key-value pairs of header metadata once it -// is available. It blocks until i) the metadata is ready or ii) there is no -// header metadata or iii) the stream is cancelled/expired. -func (s *Stream) Header() (metadata.MD, error) { - select { - case <-s.ctx.Done(): - return nil, ContextErr(s.ctx.Err()) - case <-s.goAway: - return nil, ErrStreamDrain - case <-s.headerChan: - return s.header.Copy(), nil - } -} - -// Trailer returns the cached trailer metedata. Note that if it is not called -// after the entire stream is done, it could return an empty MD. Client -// side only. -func (s *Stream) Trailer() metadata.MD { - s.mu.RLock() - defer s.mu.RUnlock() - return s.trailer.Copy() -} - -// ServerTransport returns the underlying ServerTransport for the stream. -// The client side stream always returns nil. -func (s *Stream) ServerTransport() ServerTransport { - return s.st -} - -// Context returns the context of the stream. -func (s *Stream) Context() context.Context { - return s.ctx -} - -// TraceContext recreates the context of s with a trace.Trace. -func (s *Stream) TraceContext(tr trace.Trace) { - s.ctx = trace.NewContext(s.ctx, tr) -} - -// Method returns the method for the stream. -func (s *Stream) Method() string { - return s.method -} - -// StatusCode returns statusCode received from the server. -func (s *Stream) StatusCode() codes.Code { - return s.statusCode -} - -// StatusDesc returns statusDesc received from the server. -func (s *Stream) StatusDesc() string { - return s.statusDesc -} - -// ErrIllegalTrailerSet indicates that the trailer has already been set or it -// is too late to do so. -var ErrIllegalTrailerSet = errors.New("transport: trailer has been set") - -// SetTrailer sets the trailer metadata which will be sent with the RPC status -// by the server. This can only be called at most once. Server side only. -func (s *Stream) SetTrailer(md metadata.MD) error { - s.mu.Lock() - defer s.mu.Unlock() - if s.trailer != nil { - return ErrIllegalTrailerSet - } - s.trailer = md.Copy() - return nil -} - -func (s *Stream) write(m recvMsg) { - s.buf.put(&m) -} - -// Read reads all the data available for this Stream from the transport and -// passes them into the decoder, which converts them into a gRPC message stream. -// The error is io.EOF when the stream is done or another non-nil error if -// the stream broke. -func (s *Stream) Read(p []byte) (n int, err error) { - n, err = s.dec.Read(p) - if err != nil { - return - } - s.windowHandler(n) - return -} - -// The key to save transport.Stream in the context. -type streamKey struct{} - -// newContextWithStream creates a new context from ctx and attaches stream -// to it. -func newContextWithStream(ctx context.Context, stream *Stream) context.Context { - return context.WithValue(ctx, streamKey{}, stream) -} - -// StreamFromContext returns the stream saved in ctx. -func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { - s, ok = ctx.Value(streamKey{}).(*Stream) - return -} - -// state of transport -type transportState int - -const ( - reachable transportState = iota - unreachable - closing - draining -) - -// NewServerTransport creates a ServerTransport with conn or non-nil error -// if it fails. -func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (ServerTransport, error) { - return newHTTP2Server(conn, maxStreams, authInfo) -} - -// ConnectOptions covers all relevant options for dialing a server. -type ConnectOptions struct { - // UserAgent is the application user agent. - UserAgent string - // Dialer specifies how to dial a network address. - Dialer func(context.Context, string) (net.Conn, error) - // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. - PerRPCCredentials []credentials.PerRPCCredentials - // TransportCredentials stores the Authenticator required to setup a client connection. - TransportCredentials credentials.TransportCredentials -} - -// NewClientTransport establishes the transport with the required ConnectOptions -// and returns it to the caller. -func NewClientTransport(ctx context.Context, target string, opts ConnectOptions) (ClientTransport, error) { - return newHTTP2Client(ctx, target, opts) -} - -// Options provides additional hints and information for message -// transmission. -type Options struct { - // Last indicates whether this write is the last piece for - // this stream. - Last bool - - // Delay is a hint to the transport implementation for whether - // the data could be buffered for a batching write. The - // Transport implementation may ignore the hint. - Delay bool -} - -// CallHdr carries the information of a particular RPC. -type CallHdr struct { - // Host specifies the peer's host. - Host string - - // Method specifies the operation to perform. - Method string - - // RecvCompress specifies the compression algorithm applied on - // inbound messages. - RecvCompress string - - // SendCompress specifies the compression algorithm applied on - // outbound message. - SendCompress string - - // Flush indicates whether a new stream command should be sent - // to the peer without waiting for the first data. This is - // only a hint. The transport may modify the flush decision - // for performance purposes. - Flush bool -} - -// ClientTransport is the common interface for all gRPC client-side transport -// implementations. -type ClientTransport interface { - // Close tears down this transport. Once it returns, the transport - // should not be accessed any more. The caller must make sure this - // is called only once. - Close() error - - // GracefulClose starts to tear down the transport. It stops accepting - // new RPCs and wait the completion of the pending RPCs. - GracefulClose() error - - // Write sends the data for the given stream. A nil stream indicates - // the write is to be performed on the transport as a whole. - Write(s *Stream, data []byte, opts *Options) error - - // NewStream creates a Stream for an RPC. - NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) - - // CloseStream clears the footprint of a stream when the stream is - // not needed any more. The err indicates the error incurred when - // CloseStream is called. Must be called when a stream is finished - // unless the associated transport is closing. - CloseStream(stream *Stream, err error) - - // Error returns a channel that is closed when some I/O error - // happens. Typically the caller should have a goroutine to monitor - // this in order to take action (e.g., close the current transport - // and create a new one) in error case. It should not return nil - // once the transport is initiated. - Error() <-chan struct{} - - // GoAway returns a channel that is closed when ClientTranspor - // receives the draining signal from the server (e.g., GOAWAY frame in - // HTTP/2). - GoAway() <-chan struct{} -} - -// ServerTransport is the common interface for all gRPC server-side transport -// implementations. -// -// Methods may be called concurrently from multiple goroutines, but -// Write methods for a given Stream will be called serially. -type ServerTransport interface { - // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream)) - - // WriteHeader sends the header metadata for the given stream. - // WriteHeader may not be called on all streams. - WriteHeader(s *Stream, md metadata.MD) error - - // Write sends the data for the given stream. - // Write may not be called on all streams. - Write(s *Stream, data []byte, opts *Options) error - - // WriteStatus sends the status of a stream to the client. - // WriteStatus is the final call made on a stream and always - // occurs. - WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error - - // Close tears down the transport. Once it is called, the transport - // should not be accessed any more. All the pending streams and their - // handlers will be terminated asynchronously. - Close() error - - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr - - // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain() -} - -// streamErrorf creates an StreamError with the specified error code and description. -func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError { - return StreamError{ - Code: c, - Desc: fmt.Sprintf(format, a...), - } -} - -// connectionErrorf creates an ConnectionError with the specified error description. -func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { - return ConnectionError{ - Desc: fmt.Sprintf(format, a...), - temp: temp, - err: e, - } -} - -// ConnectionError is an error that results in the termination of the -// entire connection and the retry of all the active streams. -type ConnectionError struct { - Desc string - temp bool - err error -} - -func (e ConnectionError) Error() string { - return fmt.Sprintf("connection error: desc = %q", e.Desc) -} - -// Temporary indicates if this connection error is temporary or fatal. -func (e ConnectionError) Temporary() bool { - return e.temp -} - -// Origin returns the original error of this connection error. -func (e ConnectionError) Origin() error { - // Never return nil error here. - // If the original error is nil, return itself. - if e.err == nil { - return e - } - return e.err -} - -var ( - // ErrConnClosing indicates that the transport is closing. - ErrConnClosing = connectionErrorf(true, nil, "transport is closing") - // ErrStreamDrain indicates that the stream is rejected by the server because - // the server stops accepting new RPCs. - ErrStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs") -) - -// StreamError is an error that only affects one stream within a connection. -type StreamError struct { - Code codes.Code - Desc string -} - -func (e StreamError) Error() string { - return fmt.Sprintf("stream error: code = %d desc = %q", e.Code, e.Desc) -} - -// ContextErr converts the error from context package into a StreamError. -func ContextErr(err error) StreamError { - switch err { - case context.DeadlineExceeded: - return streamErrorf(codes.DeadlineExceeded, "%v", err) - case context.Canceled: - return streamErrorf(codes.Canceled, "%v", err) - } - panic(fmt.Sprintf("Unexpected error from context packet: %v", err)) -} - -// wait blocks until it can receive from ctx.Done, closing, or proceed. -// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. -// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise -// it return the StreamError for ctx.Err. -// If it receives from goAway, it returns 0, ErrStreamDrain. -// If it receives from closing, it returns 0, ErrConnClosing. -// If it receives from proceed, it returns the received integer, nil. -func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <-chan int) (int, error) { - select { - case <-ctx.Done(): - return 0, ContextErr(ctx.Err()) - case <-done: - // User cancellation has precedence. - select { - case <-ctx.Done(): - return 0, ContextErr(ctx.Err()) - default: - } - return 0, io.EOF - case <-goAway: - return 0, ErrStreamDrain - case <-closing: - return 0, ErrConnClosing - case i := <-proceed: - return i, nil - } -} diff --git a/vendor/gopkg.in/yaml.v2/BUILD b/vendor/gopkg.in/yaml.v2/BUILD index ff1ecc8eb342..084a3980a705 100644 --- a/vendor/gopkg.in/yaml.v2/BUILD +++ b/vendor/gopkg.in/yaml.v2/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -23,6 +18,21 @@ go_library( "yamlprivateh.go", ], importpath = "gopkg.in/yaml.v2", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_xtest", + srcs = [ + "decode_test.go", + "encode_test.go", + "suite_test.go", + ], + importpath = "gopkg.in/yaml.v2_test", + deps = [ + ":go_default_library", + "//vendor/gopkg.in/check.v1:go_default_library", + ], ) filegroup( @@ -36,4 +46,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/gopkg.in/yaml.v2/decode_test.go b/vendor/gopkg.in/yaml.v2/decode_test.go new file mode 100644 index 000000000000..c159760b64f9 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode_test.go @@ -0,0 +1,988 @@ +package yaml_test + +import ( + "errors" + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "math" + "net" + "reflect" + "strings" + "time" +) + +var unmarshalIntTest = 123 + +var unmarshalTests = []struct { + data string + value interface{} +}{ + { + "", + &struct{}{}, + }, { + "{}", &struct{}{}, + }, { + "v: hi", + map[string]string{"v": "hi"}, + }, { + "v: hi", map[string]interface{}{"v": "hi"}, + }, { + "v: true", + map[string]string{"v": "true"}, + }, { + "v: true", + map[string]interface{}{"v": true}, + }, { + "v: 10", + map[string]interface{}{"v": 10}, + }, { + "v: 0b10", + map[string]interface{}{"v": 2}, + }, { + "v: 0xA", + map[string]interface{}{"v": 10}, + }, { + "v: 4294967296", + map[string]int64{"v": 4294967296}, + }, { + "v: 0.1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .Inf", + map[string]interface{}{"v": math.Inf(+1)}, + }, { + "v: -.Inf", + map[string]interface{}{"v": math.Inf(-1)}, + }, { + "v: -10", + map[string]interface{}{"v": -10}, + }, { + "v: -.1", + map[string]interface{}{"v": -0.1}, + }, + + // Simple values. + { + "123", + &unmarshalIntTest, + }, + + // Floats from spec + { + "canonical: 6.8523e+5", + map[string]interface{}{"canonical": 6.8523e+5}, + }, { + "expo: 685.230_15e+03", + map[string]interface{}{"expo": 685.23015e+03}, + }, { + "fixed: 685_230.15", + map[string]interface{}{"fixed": 685230.15}, + }, { + "neginf: -.inf", + map[string]interface{}{"neginf": math.Inf(-1)}, + }, { + "fixed: 685_230.15", + map[string]float64{"fixed": 685230.15}, + }, + //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported + //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails. + + // Bools from spec + { + "canonical: y", + map[string]interface{}{"canonical": true}, + }, { + "answer: NO", + map[string]interface{}{"answer": false}, + }, { + "logical: True", + map[string]interface{}{"logical": true}, + }, { + "option: on", + map[string]interface{}{"option": true}, + }, { + "option: on", + map[string]bool{"option": true}, + }, + // Ints from spec + { + "canonical: 685230", + map[string]interface{}{"canonical": 685230}, + }, { + "decimal: +685_230", + map[string]interface{}{"decimal": 685230}, + }, { + "octal: 02472256", + map[string]interface{}{"octal": 685230}, + }, { + "hexa: 0x_0A_74_AE", + map[string]interface{}{"hexa": 685230}, + }, { + "bin: 0b1010_0111_0100_1010_1110", + map[string]interface{}{"bin": 685230}, + }, { + "bin: -0b101010", + map[string]interface{}{"bin": -42}, + }, { + "decimal: +685_230", + map[string]int{"decimal": 685230}, + }, + + //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported + + // Nulls from spec + { + "empty:", + map[string]interface{}{"empty": nil}, + }, { + "canonical: ~", + map[string]interface{}{"canonical": nil}, + }, { + "english: null", + map[string]interface{}{"english": nil}, + }, { + "~: null key", + map[interface{}]string{nil: "null key"}, + }, { + "empty:", + map[string]*bool{"empty": nil}, + }, + + // Flow sequence + { + "seq: [A,B]", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq: [A,B,C,]", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]int{"seq": []int{1}}, + }, { + "seq: [A,1,C]", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + // Block sequence + { + "seq:\n - A\n - B", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq:\n - A\n - B\n - C", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]int{"seq": []int{1}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + + // Literal block scalar + { + "scalar: | # Comment\n\n literal\n\n \ttext\n\n", + map[string]string{"scalar": "\nliteral\n\n\ttext\n"}, + }, + + // Folded block scalar + { + "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n", + map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"}, + }, + + // Map inside interface with no type hints. + { + "a: {b: c}", + map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + }, + + // Structs and type conversions. + { + "hello: world", + &struct{ Hello string }{"world"}, + }, { + "a: {b: c}", + &struct{ A struct{ B string } }{struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A *struct{ B string } }{&struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A map[string]string }{map[string]string{"b": "c"}}, + }, { + "a: {b: c}", + &struct{ A *map[string]string }{&map[string]string{"b": "c"}}, + }, { + "a:", + &struct{ A map[string]string }{}, + }, { + "a: 1", + &struct{ A int }{1}, + }, { + "a: 1", + &struct{ A float64 }{1}, + }, { + "a: 1.0", + &struct{ A int }{1}, + }, { + "a: 1.0", + &struct{ A uint }{1}, + }, { + "a: [1, 2]", + &struct{ A []int }{[]int{1, 2}}, + }, { + "a: 1", + &struct{ B int }{0}, + }, { + "a: 1", + &struct { + B int "a" + }{1}, + }, { + "a: y", + &struct{ A bool }{true}, + }, + + // Some cross type conversions + { + "v: 42", + map[string]uint{"v": 42}, + }, { + "v: -42", + map[string]uint{}, + }, { + "v: 4294967296", + map[string]uint64{"v": 4294967296}, + }, { + "v: -4294967296", + map[string]uint64{}, + }, + + // int + { + "int_max: 2147483647", + map[string]int{"int_max": math.MaxInt32}, + }, + { + "int_min: -2147483648", + map[string]int{"int_min": math.MinInt32}, + }, + { + "int_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int{}, + }, + + // int64 + { + "int64_max: 9223372036854775807", + map[string]int64{"int64_max": math.MaxInt64}, + }, + { + "int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_max_base2": math.MaxInt64}, + }, + { + "int64_min: -9223372036854775808", + map[string]int64{"int64_min": math.MinInt64}, + }, + { + "int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_neg_base2": -math.MaxInt64}, + }, + { + "int64_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int64{}, + }, + + // uint + { + "uint_min: 0", + map[string]uint{"uint_min": 0}, + }, + { + "uint_max: 4294967295", + map[string]uint{"uint_max": math.MaxUint32}, + }, + { + "uint_underflow: -1", + map[string]uint{}, + }, + + // uint64 + { + "uint64_min: 0", + map[string]uint{"uint64_min": 0}, + }, + { + "uint64_max: 18446744073709551615", + map[string]uint64{"uint64_max": math.MaxUint64}, + }, + { + "uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111", + map[string]uint64{"uint64_max_base2": math.MaxUint64}, + }, + { + "uint64_maxint64: 9223372036854775807", + map[string]uint64{"uint64_maxint64": math.MaxInt64}, + }, + { + "uint64_underflow: -1", + map[string]uint64{}, + }, + + // float32 + { + "float32_max: 3.40282346638528859811704183484516925440e+38", + map[string]float32{"float32_max": math.MaxFloat32}, + }, + { + "float32_nonzero: 1.401298464324817070923729583289916131280e-45", + map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32}, + }, + { + "float32_maxuint64: 18446744073709551615", + map[string]float32{"float32_maxuint64": float32(math.MaxUint64)}, + }, + { + "float32_maxuint64+1: 18446744073709551616", + map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)}, + }, + + // float64 + { + "float64_max: 1.797693134862315708145274237317043567981e+308", + map[string]float64{"float64_max": math.MaxFloat64}, + }, + { + "float64_nonzero: 4.940656458412465441765687928682213723651e-324", + map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64}, + }, + { + "float64_maxuint64: 18446744073709551615", + map[string]float64{"float64_maxuint64": float64(math.MaxUint64)}, + }, + { + "float64_maxuint64+1: 18446744073709551616", + map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)}, + }, + + // Overflow cases. + { + "v: 4294967297", + map[string]int32{}, + }, { + "v: 128", + map[string]int8{}, + }, + + // Quoted values. + { + "'1': '\"2\"'", + map[interface{}]interface{}{"1": "\"2\""}, + }, { + "v:\n- A\n- 'B\n\n C'\n", + map[string][]string{"v": []string{"A", "B\nC"}}, + }, + + // Explicit tags. + { + "v: !!float '1.1'", + map[string]interface{}{"v": 1.1}, + }, { + "v: !!null ''", + map[string]interface{}{"v": nil}, + }, { + "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'", + map[string]interface{}{"v": 1}, + }, + + // Anchors and aliases. + { + "a: &x 1\nb: &y 2\nc: *x\nd: *y\n", + &struct{ A, B, C, D int }{1, 2, 1, 2}, + }, { + "a: &a {c: 1}\nb: *a", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, { + "a: &a [1, 2]\nb: *a", + &struct{ B []int }{[]int{1, 2}}, + }, { + "b: *a\na: &a {c: 1}", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, + + // Bug #1133337 + { + "foo: ''", + map[string]*string{"foo": new(string)}, + }, { + "foo: null", + map[string]string{"foo": ""}, + }, { + "foo: null", + map[string]interface{}{"foo": nil}, + }, + + // Ignored field + { + "a: 1\nb: 2\n", + &struct { + A int + B int "-" + }{1, 0}, + }, + + // Bug #1191981 + { + "" + + "%YAML 1.1\n" + + "--- !!str\n" + + `"Generic line break (no glyph)\n\` + "\n" + + ` Generic line break (glyphed)\n\` + "\n" + + ` Line separator\u2028\` + "\n" + + ` Paragraph separator\u2029"` + "\n", + "" + + "Generic line break (no glyph)\n" + + "Generic line break (glyphed)\n" + + "Line separator\u2028Paragraph separator\u2029", + }, + + // Struct inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + }, + + // Map inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + }, + + // bug 1243827 + { + "a: -b_c", + map[string]interface{}{"a": "-b_c"}, + }, + { + "a: +b_c", + map[string]interface{}{"a": "+b_c"}, + }, + { + "a: 50cent_of_dollar", + map[string]interface{}{"a": "50cent_of_dollar"}, + }, + + // Duration + { + "a: 3s", + map[string]time.Duration{"a": 3 * time.Second}, + }, + + // Issue #24. + { + "a: ", + map[string]string{"a": ""}, + }, + + // Base 60 floats are obsolete and unsupported. + { + "a: 1:1\n", + map[string]string{"a": "1:1"}, + }, + + // Binary data. + { + "a: !!binary gIGC\n", + map[string]string{"a": "\x80\x81\x82"}, + }, { + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + map[string]string{"a": strings.Repeat("\x90", 54)}, + }, { + "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n", + map[string]string{"a": strings.Repeat("\x00", 52)}, + }, + + // Ordered maps. + { + "{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}", + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + }, + + // Issue #39. + { + "a:\n b:\n c: d\n", + map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}}, + }, + + // Custom map type. + { + "a: {b: c}", + M{"a": M{"b": "c"}}, + }, + + // Support encoding.TextUnmarshaler. + { + "a: 1.2.3.4\n", + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + }, + { + "a: 2015-02-24T18:19:39Z\n", + map[string]time.Time{"a": time.Unix(1424801979, 0)}, + }, + + // Encode empty lists as zero-length slices. + { + "a: []", + &struct{ A []int }{[]int{}}, + }, + + // UTF-16-LE + { + "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n\x00", + M{"ñoño": "very yes"}, + }, + // UTF-16-LE with surrogate. + { + "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \x00=\xd8\xd4\xdf\n\x00", + M{"ñoño": "very yes 🟔"}, + }, + + // UTF-16-BE + { + "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n", + M{"ñoño": "very yes"}, + }, + // UTF-16-BE with surrogate. + { + "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \xd8=\xdf\xd4\x00\n", + M{"ñoño": "very yes 🟔"}, + }, +} + +type M map[interface{}]interface{} + +type inlineB struct { + B int + inlineC `yaml:",inline"` +} + +type inlineC struct { + C int +} + +func (s *S) TestUnmarshal(c *C) { + for _, item := range unmarshalTests { + t := reflect.ValueOf(item.value).Type() + var value interface{} + switch t.Kind() { + case reflect.Map: + value = reflect.MakeMap(t).Interface() + case reflect.String: + value = reflect.New(t).Interface() + case reflect.Ptr: + value = reflect.New(t.Elem()).Interface() + default: + c.Fatalf("missing case for %s", t) + } + err := yaml.Unmarshal([]byte(item.data), value) + if _, ok := err.(*yaml.TypeError); !ok { + c.Assert(err, IsNil) + } + if t.Kind() == reflect.String { + c.Assert(*value.(*string), Equals, item.value) + } else { + c.Assert(value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalNaN(c *C) { + value := map[string]interface{}{} + err := yaml.Unmarshal([]byte("notanum: .NaN"), &value) + c.Assert(err, IsNil) + c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true) +} + +var unmarshalErrorTests = []struct { + data, error string +}{ + {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"}, + {"v: [A,", "yaml: line 1: did not find expected node content"}, + {"v:\n- [A,", "yaml: line 2: did not find expected node content"}, + {"a: *b\n", "yaml: unknown anchor 'b' referenced"}, + {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"}, + {"value: -", "yaml: block sequence entries are not allowed in this context"}, + {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"}, + {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`}, + {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`}, +} + +func (s *S) TestUnmarshalErrors(c *C) { + for _, item := range unmarshalErrorTests { + var value interface{} + err := yaml.Unmarshal([]byte(item.data), &value) + c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) + } +} + +var unmarshalerTests = []struct { + data, tag string + value interface{} +}{ + {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}}, + {"_: [1,A]", "!!seq", []interface{}{1, "A"}}, + {"_: 10", "!!int", 10}, + {"_: null", "!!null", nil}, + {`_: BAR!`, "!!str", "BAR!"}, + {`_: "BAR!"`, "!!str", "BAR!"}, + {"_: !!foo 'BAR!'", "!!foo", "BAR!"}, +} + +var unmarshalerResult = map[int]error{} + +type unmarshalerType struct { + value interface{} +} + +func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error { + if err := unmarshal(&o.value); err != nil { + return err + } + if i, ok := o.value.(int); ok { + if result, ok := unmarshalerResult[i]; ok { + return result + } + } + return nil +} + +type unmarshalerPointer struct { + Field *unmarshalerType "_" +} + +type unmarshalerValue struct { + Field unmarshalerType "_" +} + +func (s *S) TestUnmarshalerPointerField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerPointer{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + if item.value == nil { + c.Assert(obj.Field, IsNil) + } else { + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalerValueField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerValue{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } +} + +func (s *S) TestUnmarshalerWholeDocument(c *C) { + obj := &unmarshalerType{} + err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj) + c.Assert(err, IsNil) + value, ok := obj.value.(map[interface{}]interface{}) + c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value)) + c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value) +} + +func (s *S) TestUnmarshalerTypeError(c *C) { + unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}} + unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}} + defer func() { + delete(unmarshalerResult, 2) + delete(unmarshalerResult, 4) + }() + + type T struct { + Before int + After int + M map[string]*unmarshalerType + } + var v T + data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " foo\n"+ + " bar\n"+ + " line 1: cannot unmarshal !!str `B` into int") + c.Assert(v.M["abc"], NotNil) + c.Assert(v.M["def"], IsNil) + c.Assert(v.M["ghi"], NotNil) + c.Assert(v.M["jkl"], IsNil) + + c.Assert(v.M["abc"].value, Equals, 1) + c.Assert(v.M["ghi"].value, Equals, 3) +} + +type proxyTypeError struct{} + +func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + var a int32 + var b int64 + if err := unmarshal(&s); err != nil { + panic(err) + } + if s == "a" { + if err := unmarshal(&b); err == nil { + panic("should have failed") + } + return unmarshal(&a) + } + if err := unmarshal(&a); err == nil { + panic("should have failed") + } + return unmarshal(&b) +} + +func (s *S) TestUnmarshalerTypeErrorProxying(c *C) { + type T struct { + Before int + After int + M map[string]*proxyTypeError + } + var v T + data := `{before: A, m: {abc: a, def: b}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " line 1: cannot unmarshal !!str `a` into int32\n"+ + " line 1: cannot unmarshal !!str `b` into int64\n"+ + " line 1: cannot unmarshal !!str `B` into int") +} + +type failingUnmarshaler struct{} + +var failingErr = errors.New("failingErr") + +func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + return failingErr +} + +func (s *S) TestUnmarshalerError(c *C) { + err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{}) + c.Assert(err, Equals, failingErr) +} + +type sliceUnmarshaler []int + +func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + var slice []int + err := unmarshal(&slice) + if err == nil { + *su = slice + return nil + } + + var intVal int + err = unmarshal(&intVal) + if err == nil { + *su = []int{intVal} + return nil + } + + return err +} + +func (s *S) TestUnmarshalerRetry(c *C) { + var su sliceUnmarshaler + err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3})) + + err = yaml.Unmarshal([]byte("1"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1})) +} + +// From http://yaml.org/type/merge.html +var mergeTests = ` +anchors: + list: + - &CENTER { "x": 1, "y": 2 } + - &LEFT { "x": 0, "y": 2 } + - &BIG { "r": 10 } + - &SMALL { "r": 1 } + +# All the following maps are equal: + +plain: + # Explicit keys + "x": 1 + "y": 2 + "r": 10 + label: center/big + +mergeOne: + # Merge one map + << : *CENTER + "r": 10 + label: center/big + +mergeMultiple: + # Merge multiple maps + << : [ *CENTER, *BIG ] + label: center/big + +override: + # Override + << : [ *BIG, *LEFT, *SMALL ] + "x": 1 + label: center/big + +shortTag: + # Explicit short merge tag + !!merge "<<" : [ *CENTER, *BIG ] + label: center/big + +longTag: + # Explicit merge long tag + ! "<<" : [ *CENTER, *BIG ] + label: center/big + +inlineMap: + # Inlined map + << : {"x": 1, "y": 2, "r": 10} + label: center/big + +inlineSequenceMap: + # Inlined map in sequence + << : [ *CENTER, {"r": 10} ] + label: center/big +` + +func (s *S) TestMerge(c *C) { + var want = map[interface{}]interface{}{ + "x": 1, + "y": 2, + "r": 10, + "label": "center/big", + } + + var m map[interface{}]interface{} + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, DeepEquals, want, Commentf("test %q failed", name)) + } +} + +func (s *S) TestMergeStruct(c *C) { + type Data struct { + X, Y, R int + Label string + } + want := Data{1, 2, 10, "center/big"} + + var m map[string]Data + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, Equals, want, Commentf("test %q failed", name)) + } +} + +var unmarshalNullTests = []func() interface{}{ + func() interface{} { var v interface{}; v = "v"; return &v }, + func() interface{} { var s = "s"; return &s }, + func() interface{} { var s = "s"; sptr := &s; return &sptr }, + func() interface{} { var i = 1; return &i }, + func() interface{} { var i = 1; iptr := &i; return &iptr }, + func() interface{} { m := map[string]int{"s": 1}; return &m }, + func() interface{} { m := map[string]int{"s": 1}; return m }, +} + +func (s *S) TestUnmarshalNull(c *C) { + for _, test := range unmarshalNullTests { + item := test() + zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface() + err := yaml.Unmarshal([]byte("null"), item) + c.Assert(err, IsNil) + if reflect.TypeOf(item).Kind() == reflect.Map { + c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface()) + } else { + c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero) + } + } +} + +func (s *S) TestUnmarshalSliceOnPreset(c *C) { + // Issue #48. + v := struct{ A []int }{[]int{1}} + yaml.Unmarshal([]byte("a: [2]"), &v) + c.Assert(v.A, DeepEquals, []int{2}) +} + +//var data []byte +//func init() { +// var err error +// data, err = ioutil.ReadFile("/tmp/file.yaml") +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkUnmarshal(c *C) { +// var err error +// for i := 0; i < c.N; i++ { +// var v map[string]interface{} +// err = yaml.Unmarshal(data, &v) +// } +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkMarshal(c *C) { +// var v map[string]interface{} +// yaml.Unmarshal(data, &v) +// c.ResetTimer() +// for i := 0; i < c.N; i++ { +// yaml.Marshal(&v) +// } +//} diff --git a/vendor/gopkg.in/yaml.v2/encode_test.go b/vendor/gopkg.in/yaml.v2/encode_test.go new file mode 100644 index 000000000000..84099bd3850a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode_test.go @@ -0,0 +1,501 @@ +package yaml_test + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" + + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "net" + "os" +) + +var marshalIntTest = 123 + +var marshalTests = []struct { + value interface{} + data string +}{ + { + nil, + "null\n", + }, { + &struct{}{}, + "{}\n", + }, { + map[string]string{"v": "hi"}, + "v: hi\n", + }, { + map[string]interface{}{"v": "hi"}, + "v: hi\n", + }, { + map[string]string{"v": "true"}, + "v: \"true\"\n", + }, { + map[string]string{"v": "false"}, + "v: \"false\"\n", + }, { + map[string]interface{}{"v": true}, + "v: true\n", + }, { + map[string]interface{}{"v": false}, + "v: false\n", + }, { + map[string]interface{}{"v": 10}, + "v: 10\n", + }, { + map[string]interface{}{"v": -10}, + "v: -10\n", + }, { + map[string]uint{"v": 42}, + "v: 42\n", + }, { + map[string]interface{}{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]int64{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]uint64{"v": 4294967296}, + "v: 4294967296\n", + }, { + map[string]interface{}{"v": "10"}, + "v: \"10\"\n", + }, { + map[string]interface{}{"v": 0.1}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": float64(0.1)}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": -0.1}, + "v: -0.1\n", + }, { + map[string]interface{}{"v": math.Inf(+1)}, + "v: .inf\n", + }, { + map[string]interface{}{"v": math.Inf(-1)}, + "v: -.inf\n", + }, { + map[string]interface{}{"v": math.NaN()}, + "v: .nan\n", + }, { + map[string]interface{}{"v": nil}, + "v: null\n", + }, { + map[string]interface{}{"v": ""}, + "v: \"\"\n", + }, { + map[string][]string{"v": []string{"A", "B"}}, + "v:\n- A\n- B\n", + }, { + map[string][]string{"v": []string{"A", "B\nC"}}, + "v:\n- A\n- |-\n B\n C\n", + }, { + map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}}, + "v:\n- A\n- 1\n- B:\n - 2\n - 3\n", + }, { + map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + "a:\n b: c\n", + }, { + map[string]interface{}{"a": "-"}, + "a: '-'\n", + }, + + // Simple values. + { + &marshalIntTest, + "123\n", + }, + + // Structures + { + &struct{ Hello string }{"world"}, + "hello: world\n", + }, { + &struct { + A struct { + B string + } + }{struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{&struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{}, + "a: null\n", + }, { + &struct{ A int }{1}, + "a: 1\n", + }, { + &struct{ A []int }{[]int{1, 2}}, + "a:\n- 1\n- 2\n", + }, { + &struct { + B int "a" + }{1}, + "a: 1\n", + }, { + &struct{ A bool }{true}, + "a: true\n", + }, + + // Conditional flag + { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{1, 0}, + "a: 1\n", + }, { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{0, 0}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{nil}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{}}, + "a: {x: 0}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{0, 1}}, + "{}\n", + }, { + &struct { + A float64 "a,omitempty" + B float64 "b,omitempty" + }{1, 0}, + "a: 1\n", + }, + + // Flow flag + { + &struct { + A []int "a,flow" + }{[]int{1, 2}}, + "a: [1, 2]\n", + }, { + &struct { + A map[string]string "a,flow" + }{map[string]string{"b": "c", "d": "e"}}, + "a: {b: c, d: e}\n", + }, { + &struct { + A struct { + B, D string + } "a,flow" + }{struct{ B, D string }{"c", "e"}}, + "a: {b: c, d: e}\n", + }, + + // Unexported field + { + &struct { + u int + A int + }{0, 1}, + "a: 1\n", + }, + + // Ignored field + { + &struct { + A int + B int "-" + }{1, 2}, + "a: 1\n", + }, + + // Struct inlining + { + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Map inlining + { + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Duration + { + map[string]time.Duration{"a": 3 * time.Second}, + "a: 3s\n", + }, + + // Issue #24: bug in map merging logic. + { + map[string]string{"a": ""}, + "a: \n", + }, + + // Issue #34: marshal unsupported base 60 floats quoted for compatibility + // with old YAML 1.1 parsers. + { + map[string]string{"a": "1:1"}, + "a: \"1:1\"\n", + }, + + // Binary data. + { + map[string]string{"a": "\x00"}, + "a: \"\\0\"\n", + }, { + map[string]string{"a": "\x80\x81\x82"}, + "a: !!binary gIGC\n", + }, { + map[string]string{"a": strings.Repeat("\x90", 54)}, + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + }, + + // Ordered maps. + { + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + "b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n", + }, + + // Encode unicode as utf-8 rather than in escaped form. + { + map[string]string{"a": "你好"}, + "a: 你好\n", + }, + + // Support encoding.TextMarshaler. + { + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + "a: 1.2.3.4\n", + }, + { + map[string]time.Time{"a": time.Unix(1424801979, 0)}, + "a: 2015-02-24T18:19:39Z\n", + }, + + // Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible). + { + map[string]string{"a": "b: c"}, + "a: 'b: c'\n", + }, + + // Containing hash mark ('#') in string should be quoted + { + map[string]string{"a": "Hello #comment"}, + "a: 'Hello #comment'\n", + }, + { + map[string]string{"a": "你好 #comment"}, + "a: '你好 #comment'\n", + }, +} + +func (s *S) TestMarshal(c *C) { + defer os.Setenv("TZ", os.Getenv("TZ")) + os.Setenv("TZ", "UTC") + for _, item := range marshalTests { + data, err := yaml.Marshal(item.value) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, item.data) + } +} + +var marshalErrorTests = []struct { + value interface{} + error string + panic string +}{{ + value: &struct { + B int + inlineB ",inline" + }{1, inlineB{2, inlineC{3}}}, + panic: `Duplicated key 'b' in struct struct \{ B int; .*`, +}, { + value: &struct { + A int + B map[string]int ",inline" + }{1, map[string]int{"a": 2}}, + panic: `Can't have key "a" in inlined map; conflicts with struct field`, +}} + +func (s *S) TestMarshalErrors(c *C) { + for _, item := range marshalErrorTests { + if item.panic != "" { + c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic) + } else { + _, err := yaml.Marshal(item.value) + c.Assert(err, ErrorMatches, item.error) + } + } +} + +func (s *S) TestMarshalTypeCache(c *C) { + var data []byte + var err error + func() { + type T struct{ A int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + func() { + type T struct{ B int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + c.Assert(string(data), Equals, "b: 0\n") +} + +var marshalerTests = []struct { + data string + value interface{} +}{ + {"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}}, + {"_:\n- 1\n- A\n", []interface{}{1, "A"}}, + {"_: 10\n", 10}, + {"_: null\n", nil}, + {"_: BAR!\n", "BAR!"}, +} + +type marshalerType struct { + value interface{} +} + +func (o marshalerType) MarshalText() ([]byte, error) { + panic("MarshalText called on type with MarshalYAML") +} + +func (o marshalerType) MarshalYAML() (interface{}, error) { + return o.value, nil +} + +type marshalerValue struct { + Field marshalerType "_" +} + +func (s *S) TestMarshaler(c *C) { + for _, item := range marshalerTests { + obj := &marshalerValue{} + obj.Field.value = item.value + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, string(item.data)) + } +} + +func (s *S) TestMarshalerWholeDocument(c *C) { + obj := &marshalerType{} + obj.value = map[string]string{"hello": "world!"} + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, "hello: world!\n") +} + +type failingMarshaler struct{} + +func (ft *failingMarshaler) MarshalYAML() (interface{}, error) { + return nil, failingErr +} + +func (s *S) TestMarshalerError(c *C) { + _, err := yaml.Marshal(&failingMarshaler{}) + c.Assert(err, Equals, failingErr) +} + +func (s *S) TestSortedOutput(c *C) { + order := []interface{}{ + false, + true, + 1, + uint(1), + 1.0, + 1.1, + 1.2, + 2, + uint(2), + 2.0, + 2.1, + "", + ".1", + ".2", + ".a", + "1", + "2", + "a!10", + "a/2", + "a/10", + "a~10", + "ab/1", + "b/1", + "b/01", + "b/2", + "b/02", + "b/3", + "b/03", + "b1", + "b01", + "b3", + "c2.10", + "c10.2", + "d1", + "d12", + "d12a", + } + m := make(map[interface{}]int) + for _, k := range order { + m[k] = 1 + } + data, err := yaml.Marshal(m) + c.Assert(err, IsNil) + out := "\n" + string(data) + last := 0 + for i, k := range order { + repr := fmt.Sprint(k) + if s, ok := k.(string); ok { + if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil { + repr = `"` + repr + `"` + } + } + index := strings.Index(out, "\n"+repr+":") + if index == -1 { + c.Fatalf("%#v is not in the output: %#v", k, out) + } + if index < last { + c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out) + } + last = index + } +} diff --git a/vendor/gopkg.in/yaml.v2/suite_test.go b/vendor/gopkg.in/yaml.v2/suite_test.go new file mode 100644 index 000000000000..c5cf1ed4f6e6 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/suite_test.go @@ -0,0 +1,12 @@ +package yaml_test + +import ( + . "gopkg.in/check.v1" + "testing" +) + +func Test(t *testing.T) { TestingT(t) } + +type S struct{} + +var _ = Suite(&S{}) diff --git a/vendor/k8s.io/contrib/.gitignore b/vendor/k8s.io/contrib/.gitignore new file mode 100644 index 000000000000..f7ea4d25dc53 --- /dev/null +++ b/vendor/k8s.io/contrib/.gitignore @@ -0,0 +1,23 @@ +# OSX leaves these everywhere on SMB shares +._* + +# OSX trash +.DS_Store + +# Eclipse files +.classpath +.project +.settings/** + +# Emacs save files +*~ +\#*\# +.\#* + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist + diff --git a/vendor/k8s.io/contrib/.travis.yml b/vendor/k8s.io/contrib/.travis.yml new file mode 100644 index 000000000000..bbd58e0b0b85 --- /dev/null +++ b/vendor/k8s.io/contrib/.travis.yml @@ -0,0 +1,24 @@ +sudo: required + +services: + - docker + +language: go + +matrix: + include: + - go: 1.6 + +install: + - mkdir -p $HOME/gopath/src/k8s.io + - mv $TRAVIS_BUILD_DIR $HOME/gopath/src/k8s.io/contrib + - hack/install-verify-tools.sh + - export PATH=$GOPATH/bin:$PATH + - hack/for-go-proj.sh install + +script: + - hack/verify-all.sh -v + - hack/for-go-proj.sh test + +notifications: + irc: "chat.freenode.net#kubernetes-dev" diff --git a/vendor/k8s.io/contrib/CONTRIBUTING.md b/vendor/k8s.io/contrib/CONTRIBUTING.md new file mode 100644 index 000000000000..f95eae7c4a7a --- /dev/null +++ b/vendor/k8s.io/contrib/CONTRIBUTING.md @@ -0,0 +1,3 @@ +# How to become a contributor and submit your own code + +For instructions on how to contribute to the Kubernetes project please refer to the official [contributing guide](https://releases.k8s.io/HEAD/CONTRIBUTING.md) diff --git a/vendor/k8s.io/contrib/OWNERS b/vendor/k8s.io/contrib/OWNERS new file mode 100644 index 000000000000..1ed7b8b6e9db --- /dev/null +++ b/vendor/k8s.io/contrib/OWNERS @@ -0,0 +1,4 @@ +assignees: +- bprashanth +- eparis +- lavalamp diff --git a/vendor/k8s.io/contrib/README.md b/vendor/k8s.io/contrib/README.md new file mode 100644 index 000000000000..340768dc84d3 --- /dev/null +++ b/vendor/k8s.io/contrib/README.md @@ -0,0 +1,88 @@ +# Kubernetes Contrib + +[![Build Status](https://travis-ci.org/kubernetes/contrib.svg)](https://travis-ci.org/kubernetes/contrib) + +This is a place for various components in the Kubernetes ecosystem +that aren't part of the Kubernetes core. + +## Getting the Code + +The code must be checked out as a subdirectory of `k8s.io`, and not `github.com`. + +```shell +mkdir -p $GOPATH/src/k8s.io +cd $GOPATH/src/k8s.io +# Replace "$YOUR_GITHUB_USERNAME" below with your github username +git clone https://github.com/$YOUR_GITHUB_USERNAME/contrib.git +cd contrib +``` + +## Updating Godeps + +Godeps in contrib/ has a different layout than in kubernetes/ proper. This is because +contrib contains multiple tiny projects, each with their own dependencies. Each +in contrib/ has it's own Godeps.json. For example the Godeps.json for Ingress +is Ingress/Godeps/Godeps.json. This means that godeps commands like `godep restore` +or `godep test` work in the root directory. Theys should be run from inside the +subproject directory you want to test. + +## Prerequisites for updating Godeps + +Since we vendor godeps through `/vendor` vs the old style `Godeps/_workspace`, you either need a more recent install of go and godeps, or you need to set `GO15VENDOREXPERIMENT=1`. Eg: +```shell +$ godep version +godep v74 (linux/amd64/go1.6.1) +$ go version +go version go1.6.1 linux/amd64 +$ godep save ./... +``` + +Will automatically save godeps to `vendor/` instead of `_workspace/`. +If you have an older version of go, you must run: +```shell +$ GO15VENDOREXPERIMENT=1 godep save ./... +``` + +If you have an older version of godep, you must update it: +```shell +$ go get github.com/tools/godep +$ cd $GOPATH/src/github.com/tools/godep +$ go build -o godep *.go +``` + +## Updating Godeps + +The most common dep to update is obviously going to be kuberetes proper. Updating +kubernetes and it's dependancies in the Ingress subproject for example can be done +as follows (the example assumes you Kubernetes repo is rooted at `$GOPATH/src/github.com/kubernetes`, `s/github.com\/kubernetes/k8s.io/` as required): +```shell +cd $GOPATH/src/github.com/kubernetes/contrib/ingress +godep restore +go get -u github.com/kubernetes/kubernetes +cd $GOPATH/src/github.com/kubernetes/kubernetes +godep restore +cd $GOPATH/src/github/kubernetes/contrib/ingress +rm -rf Godeps +godep save ./... +git [add/remove] as needed +git commit +``` + +Other deps are similar, although if the dep you wish to update is included from +kubernetes we probably want to stay in sync using the above method. If the dep is not in kubernetes proper something like the following should get you a nice clean result: +```shell +cd $GOPATH/src/github/kubernetes/contrib/ingress +godep restore +go get -u $SOME_DEP +rm -rf Godeps +godep save ./... +git [add/remove] as needed +git commit +``` + +## Running all tests + +To run all go test in all projects do this: +```shell +./hack/for-go-proj.sh test +``` diff --git a/vendor/k8s.io/contrib/labels.yaml b/vendor/k8s.io/contrib/labels.yaml new file mode 100644 index 000000000000..3c9ae8883d70 --- /dev/null +++ b/vendor/k8s.io/contrib/labels.yaml @@ -0,0 +1,277 @@ +labels: + - name: 0 - Backlog + color: CCCCCC + - name: 0 - Triage + color: AAAAAA + - name: 1 - Ready + color: CCCCCC + - name: 2 - Working <= 5 + color: CCCCCC + - name: 3 - Review + color: CCCCCC + - name: 4 - Done + color: CCCCCC + - name: approved + color: 0ffa16 + - name: area/HA + color: bfd4f2 + - name: area/admin + color: "009800" + - name: area/api + color: e1ed21 + - name: area/app-config-deployment + color: fb6420 + - name: area/auth + color: 00cc00 + - name: area/autoscaling + color: "5319e7" + - name: area/batch + color: c7def8 + - name: area/build-release + color: bfe5bf + - name: area/cadvisor + color: fef2c0 + - name: area/cloudprovider + color: f1fd21 + - name: area/cluster-federation + color: fad8c7 + - name: area/cluster-lifecycle + color: d4c5f9 + - name: area/devexp + color: "5319e7" + - name: area/downward-api + color: 8fffcf + - name: area/example/cassandra + color: d4c5f9 + - name: area/extensibility + color: ffaa04 + - name: area/images-registry + color: eba420 + - name: area/introspection + color: bfe5bf + - name: area/isolation + color: aba420 + - name: area/kubelet-api + color: fbca04 + - name: area/logging + color: fef2c0 + - name: area/monitoring + color: fef2c0 + - name: area/networking + color: 006b75 + - name: area/os/coreos + color: d4c5f9 + - name: area/os/fedora + color: d4c5f9 + - name: area/os/gci + color: e99695 + - name: area/os/ubuntu + color: d4c5f9 + - name: area/performance + color: d4c5f9 + - name: area/platform/aws + color: d4c5f9 + - name: area/platform/azure + color: d4c5f9 + - name: area/platform/gce + color: d4c5f9 + - name: area/platform/gke + color: d4c5f9 + - name: area/platform/mesos + color: d4c5f9 + - name: area/platform/openstack + color: d4c5f9 + - name: area/platform/vagrant + color: d4c5f9 + - name: area/platform/vsphere + color: d4c5f9 + - name: area/reliability + color: a10d01 + - name: area/scheduling + color: ab6420 + - name: area/security + color: d4c5f9 + - name: area/stateful-apps + color: ff6060 + - name: area/storage + color: 006b75 + - name: area/swagger + color: d4c5f9 + - name: area/system-requirement + color: bfdadc + - name: area/test + color: fad8c7 + - name: area/upgrade + color: bfd4f2 + - name: area/upward-api + color: a0c280 + - name: area/usability + color: bfe5bf + - name: area/volumes + color: bfe5bf + - name: cherrypick-approved + color: fef2c0 + - name: cherrypick-candidate + color: fef2c0 + - name: 'cla: human-approved' + color: bfe5bf + - name: 'cncf-cla: no' + color: e11d21 + - name: 'cncf-cla: yes' + color: bfe5bf + - name: component/apiserver + color: 0052cc + - name: component/clientlib + color: 0052cc + - name: component/controller-manager + color: 0052cc + - name: component/daemonset + color: 0052cc + - name: component/deployment + color: 0052cc + - name: component/dns + color: 1d76db + - name: component/kube-proxy + color: 0052cc + - name: component/kubectl + color: 0052cc + - name: component/kubelet + color: 0052cc + - name: component/node-e2e + color: 0052cc + - name: component/nodecontroller + color: 0052cc + - name: component/ui + color: 0052cc + - name: dependency/docker + color: fbfa04 + - name: dependency/etcd + color: fbfa04 + - name: dependency/rkt + color: fbfa04 + - name: do-not-merge + color: e11d21 + - name: documentation/better-examples + color: f9d0c4 + - name: documentation/confusing + color: f9d0c4 + - name: documentation/content-gap + color: f9d0c4 + - name: documentation/out-of-date + color: f9d0c4 + - name: help-wanted + color: 006b75 + - name: keep-open + color: fbca04 + - name: kind/api-change + color: c7def8 + - name: kind/bug + color: e11d21 + - name: kind/cleanup + color: c7def8 + - name: kind/design + color: c7def8 + - name: kind/documentation + color: c7def8 + - name: kind/ecosystem-enablement + color: c7def8 + - name: kind/enhancement + color: c7def8 + - name: kind/example + color: c7def8 + - name: kind/feature + color: c7def8 + - name: kind/flake + color: f7c6c7 + - name: kind/friction + color: c7def8 + - name: kind/mesos-flake + color: f7c6c7 + - name: kind/new-api + color: c7def8 + - name: kind/postmortem + color: bfe5bf + - name: kind/support + color: eb6420 + - name: kind/technical-debt + color: c7def8 + - name: kind/velocity-improvement + color: c7def8 + - name: kind/workaround-removal + color: c7def8 + - name: lgtm + color: 15dd18 + - name: needs-ok-to-merge + color: ededed + - name: needs-rebase + color: BDBDBD + - name: ok-to-merge + color: fbca04 + - name: priority/P0 + color: e11d21 + - name: priority/P1 + color: eb6420 + - name: priority/P2 + color: fbca04 + - name: priority/P3 + color: fef2c0 + - name: release-note + color: c2e0c6 + - name: release-note-action-required + color: c2e0c6 + - name: release-note-experimental + color: c2e0c6 + - name: release-note-label-needed + color: db5a64 + - name: release-note-none + color: c2e0c6 + - name: retest-not-required + color: eb6420 + - name: retest-not-required-docs-only + color: fbca04 + - name: size/L + color: ee9900 + - name: size/M + color: eebb00 + - name: size/S + color: 77bb00 + - name: size/XL + color: ee5500 + - name: size/XS + color: "009900" + - name: size/XXL + color: ee0000 + - name: stale + color: "795548" + - name: team/CSI-API Machinery SIG + color: d2b48c + - name: team/api + color: d2b48c + - name: team/cluster + color: d2b48c + - name: team/control-plane + color: d2b48c + - name: team/gke + color: d2b48c + - name: team/huawei + color: d2b48c + - name: team/mesosphere + color: d2b48c + - name: team/node + color: d2b48c + - name: team/none + color: d2b48c + - name: team/redhat + color: d2b48c + - name: team/release-infra + color: d2b48c + - name: team/rktnetes + color: d2b48c + - name: team/sig-aws + color: d2b48c + - name: team/test-infra + color: d2b48c + - name: team/ux + color: d2b48c + - name: team/workloads + color: d2b48c diff --git a/vendor/k8s.io/contrib/test-utils/.gitignore b/vendor/k8s.io/contrib/test-utils/.gitignore new file mode 100644 index 000000000000..4c42cfe0f3aa --- /dev/null +++ b/vendor/k8s.io/contrib/test-utils/.gitignore @@ -0,0 +1,24 @@ +build + +# OSX leaves these everywhere on SMB shares +._* + +# OSX trash +.DS_Store + +# Eclipse files +.classpath +.project +.settings/** + +# Emacs save files +*~ +\#*\# +.\#* + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist diff --git a/vendor/k8s.io/contrib/test-utils/OWNERS b/vendor/k8s.io/contrib/test-utils/OWNERS new file mode 100644 index 000000000000..57b3b2b047c1 --- /dev/null +++ b/vendor/k8s.io/contrib/test-utils/OWNERS @@ -0,0 +1,5 @@ +assignees: +- freehan +- gmarek +- lavalamp + diff --git a/vendor/k8s.io/contrib/test-utils/README.md b/vendor/k8s.io/contrib/test-utils/README.md new file mode 100644 index 000000000000..0bf38d9f8622 --- /dev/null +++ b/vendor/k8s.io/contrib/test-utils/README.md @@ -0,0 +1,3 @@ +# GCS test buckets utilities + +Various utilities for getting test data from public GCS buckets \ No newline at end of file diff --git a/vendor/k8s.io/contrib/test-utils/utils/BUILD b/vendor/k8s.io/contrib/test-utils/utils/BUILD index 5916f8beaf7d..c5968224bed3 100644 --- a/vendor/k8s.io/contrib/test-utils/utils/BUILD +++ b/vendor/k8s.io/contrib/test-utils/utils/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -12,9 +7,20 @@ go_library( "utils.go", ], importpath = "k8s.io/contrib/test-utils/utils", + visibility = ["//visibility:public"], deps = ["//vendor/github.com/golang/glog:go_default_library"], ) +go_test( + name = "go_default_test", + srcs = [ + "bucket_test.go", + "utils_test.go", + ], + importpath = "k8s.io/contrib/test-utils/utils", + library = ":go_default_library", +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -26,4 +32,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/k8s.io/contrib/test-utils/utils/bucket_test.go b/vendor/k8s.io/contrib/test-utils/utils/bucket_test.go new file mode 100644 index 000000000000..9072cdf14ca5 --- /dev/null +++ b/vendor/k8s.io/contrib/test-utils/utils/bucket_test.go @@ -0,0 +1,43 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "testing" +) + +func TestExpandListURL(t *testing.T) { + table := []struct { + bucket string + pathParts []interface{} + expect string + }{ + { + bucket: "kubernetes-jenkins", + pathParts: []interface{}{"logs", "kubernetes-gce-e2e", 1458, "artifacts", "junit"}, + expect: "https://www.googleapis.com/storage/v1/b/kubernetes-jenkins/o?prefix=logs%2Fkubernetes-gce-e2e%2F1458%2Fartifacts%2Fjunit", + }, + } + + for _, tt := range table { + b := NewBucket(tt.bucket) + out := b.ExpandListURL(tt.pathParts...).String() + if out != tt.expect { + t.Errorf("Expected %v but got %v", tt.expect, out) + } + } +} diff --git a/vendor/k8s.io/contrib/test-utils/utils/utils_test.go b/vendor/k8s.io/contrib/test-utils/utils/utils_test.go new file mode 100644 index 000000000000..a43d6f3a8275 --- /dev/null +++ b/vendor/k8s.io/contrib/test-utils/utils/utils_test.go @@ -0,0 +1,76 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" +) + +func TestGetPathToJenkinsGoogleBucket(t *testing.T) { + const ( + bucket = "kubernetes-jenkins" + dir = "logs" + pullDir = "pr-logs" + pullKey = "pull" + ) + table := []struct { + job string + build int + expect string + }{ + { + job: "kubernetes-gce-e2e", + build: 1458, + expect: "/kubernetes-jenkins/logs/kubernetes-gce-e2e/1458/", + }, + { + job: "kubernetes-pull-build-test-e2e-gce", + build: 46924, + expect: "/kubernetes-jenkins/pr-logs/pull/27898/kubernetes-pull-build-test-e2e-gce/46924/", + }, + } + + m := http.NewServeMux() + m.HandleFunc( + "/kubernetes-jenkins/pr-logs/directory/kubernetes-pull-build-test-e2e-gce/46924.txt", + func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, "gs://kubernetes-jenkins/pr-logs/pull/27898/kubernetes-pull-build-test-e2e-gce/46924\n") + }, + ) + m.HandleFunc( + "/", + func(w http.ResponseWriter, req *http.Request) { + t.Errorf("Unexpected request to %v", req.URL.String()) + http.NotFound(w, req) + }, + ) + server := httptest.NewServer(m) + defer server.Close() + + for _, tt := range table { + u := NewWithPresubmitDetection(bucket, dir, pullKey, pullDir) + u.bucket = NewTestBucket(bucket, server.URL) + out := u.GetPathToJenkinsGoogleBucket(tt.job, tt.build) + if out != tt.expect { + t.Errorf("Expected %v but got %v", tt.expect, out) + } + } +} diff --git a/vendor/k8s.io/kubernetes/.generated_docs b/vendor/k8s.io/kubernetes/.generated_docs new file mode 100644 index 000000000000..b926c676e7ba --- /dev/null +++ b/vendor/k8s.io/kubernetes/.generated_docs @@ -0,0 +1,189 @@ +.generated_docs +docs/admin/federation-apiserver.md +docs/admin/federation-controller-manager.md +docs/admin/kube-apiserver.md +docs/admin/kube-controller-manager.md +docs/admin/kube-proxy.md +docs/admin/kube-scheduler.md +docs/admin/kubelet.md +docs/man/man1/kubectl-annotate.1 +docs/man/man1/kubectl-api-versions.1 +docs/man/man1/kubectl-apply.1 +docs/man/man1/kubectl-attach.1 +docs/man/man1/kubectl-autoscale.1 +docs/man/man1/kubectl-cluster-info-dump.1 +docs/man/man1/kubectl-cluster-info.1 +docs/man/man1/kubectl-completion.1 +docs/man/man1/kubectl-config-current-context.1 +docs/man/man1/kubectl-config-delete-cluster.1 +docs/man/man1/kubectl-config-delete-context.1 +docs/man/man1/kubectl-config-get-clusters.1 +docs/man/man1/kubectl-config-get-contexts.1 +docs/man/man1/kubectl-config-set-cluster.1 +docs/man/man1/kubectl-config-set-context.1 +docs/man/man1/kubectl-config-set-credentials.1 +docs/man/man1/kubectl-config-set.1 +docs/man/man1/kubectl-config-unset.1 +docs/man/man1/kubectl-config-use-context.1 +docs/man/man1/kubectl-config-view.1 +docs/man/man1/kubectl-config.1 +docs/man/man1/kubectl-convert.1 +docs/man/man1/kubectl-cordon.1 +docs/man/man1/kubectl-create-configmap.1 +docs/man/man1/kubectl-create-deployment.1 +docs/man/man1/kubectl-create-namespace.1 +docs/man/man1/kubectl-create-quota.1 +docs/man/man1/kubectl-create-secret-docker-registry.1 +docs/man/man1/kubectl-create-secret-generic.1 +docs/man/man1/kubectl-create-secret-tls.1 +docs/man/man1/kubectl-create-secret.1 +docs/man/man1/kubectl-create-service-clusterip.1 +docs/man/man1/kubectl-create-service-loadbalancer.1 +docs/man/man1/kubectl-create-service-nodeport.1 +docs/man/man1/kubectl-create-service.1 +docs/man/man1/kubectl-create-serviceaccount.1 +docs/man/man1/kubectl-create.1 +docs/man/man1/kubectl-delete.1 +docs/man/man1/kubectl-describe.1 +docs/man/man1/kubectl-drain.1 +docs/man/man1/kubectl-edit.1 +docs/man/man1/kubectl-exec.1 +docs/man/man1/kubectl-explain.1 +docs/man/man1/kubectl-expose.1 +docs/man/man1/kubectl-get.1 +docs/man/man1/kubectl-label.1 +docs/man/man1/kubectl-logs.1 +docs/man/man1/kubectl-namespace.1 +docs/man/man1/kubectl-options.1 +docs/man/man1/kubectl-patch.1 +docs/man/man1/kubectl-port-forward.1 +docs/man/man1/kubectl-proxy.1 +docs/man/man1/kubectl-replace.1 +docs/man/man1/kubectl-rolling-update.1 +docs/man/man1/kubectl-rollout-history.1 +docs/man/man1/kubectl-rollout-pause.1 +docs/man/man1/kubectl-rollout-resume.1 +docs/man/man1/kubectl-rollout-status.1 +docs/man/man1/kubectl-rollout-undo.1 +docs/man/man1/kubectl-rollout.1 +docs/man/man1/kubectl-run.1 +docs/man/man1/kubectl-scale.1 +docs/man/man1/kubectl-set-image.1 +docs/man/man1/kubectl-set.1 +docs/man/man1/kubectl-stop.1 +docs/man/man1/kubectl-taint.1 +docs/man/man1/kubectl-top-node.1 +docs/man/man1/kubectl-top-pod.1 +docs/man/man1/kubectl-top.1 +docs/man/man1/kubectl-uncordon.1 +docs/man/man1/kubectl-version.1 +docs/man/man1/kubectl.1 +docs/user-guide/kubectl/kubectl.md +docs/user-guide/kubectl/kubectl_annotate.md +docs/user-guide/kubectl/kubectl_api-versions.md +docs/user-guide/kubectl/kubectl_apply.md +docs/user-guide/kubectl/kubectl_attach.md +docs/user-guide/kubectl/kubectl_autoscale.md +docs/user-guide/kubectl/kubectl_cluster-info.md +docs/user-guide/kubectl/kubectl_cluster-info_dump.md +docs/user-guide/kubectl/kubectl_completion.md +docs/user-guide/kubectl/kubectl_config.md +docs/user-guide/kubectl/kubectl_config_current-context.md +docs/user-guide/kubectl/kubectl_config_delete-cluster.md +docs/user-guide/kubectl/kubectl_config_delete-context.md +docs/user-guide/kubectl/kubectl_config_get-clusters.md +docs/user-guide/kubectl/kubectl_config_get-contexts.md +docs/user-guide/kubectl/kubectl_config_set-cluster.md +docs/user-guide/kubectl/kubectl_config_set-context.md +docs/user-guide/kubectl/kubectl_config_set-credentials.md +docs/user-guide/kubectl/kubectl_config_set.md +docs/user-guide/kubectl/kubectl_config_unset.md +docs/user-guide/kubectl/kubectl_config_use-context.md +docs/user-guide/kubectl/kubectl_config_view.md +docs/user-guide/kubectl/kubectl_convert.md +docs/user-guide/kubectl/kubectl_cordon.md +docs/user-guide/kubectl/kubectl_create.md +docs/user-guide/kubectl/kubectl_create_configmap.md +docs/user-guide/kubectl/kubectl_create_deployment.md +docs/user-guide/kubectl/kubectl_create_namespace.md +docs/user-guide/kubectl/kubectl_create_quota.md +docs/user-guide/kubectl/kubectl_create_secret.md +docs/user-guide/kubectl/kubectl_create_secret_docker-registry.md +docs/user-guide/kubectl/kubectl_create_secret_generic.md +docs/user-guide/kubectl/kubectl_create_secret_tls.md +docs/user-guide/kubectl/kubectl_create_service.md +docs/user-guide/kubectl/kubectl_create_service_clusterip.md +docs/user-guide/kubectl/kubectl_create_service_loadbalancer.md +docs/user-guide/kubectl/kubectl_create_service_nodeport.md +docs/user-guide/kubectl/kubectl_create_serviceaccount.md +docs/user-guide/kubectl/kubectl_delete.md +docs/user-guide/kubectl/kubectl_describe.md +docs/user-guide/kubectl/kubectl_drain.md +docs/user-guide/kubectl/kubectl_edit.md +docs/user-guide/kubectl/kubectl_exec.md +docs/user-guide/kubectl/kubectl_explain.md +docs/user-guide/kubectl/kubectl_expose.md +docs/user-guide/kubectl/kubectl_get.md +docs/user-guide/kubectl/kubectl_label.md +docs/user-guide/kubectl/kubectl_logs.md +docs/user-guide/kubectl/kubectl_namespace.md +docs/user-guide/kubectl/kubectl_options.md +docs/user-guide/kubectl/kubectl_patch.md +docs/user-guide/kubectl/kubectl_port-forward.md +docs/user-guide/kubectl/kubectl_proxy.md +docs/user-guide/kubectl/kubectl_replace.md +docs/user-guide/kubectl/kubectl_rolling-update.md +docs/user-guide/kubectl/kubectl_rollout.md +docs/user-guide/kubectl/kubectl_rollout_history.md +docs/user-guide/kubectl/kubectl_rollout_pause.md +docs/user-guide/kubectl/kubectl_rollout_resume.md +docs/user-guide/kubectl/kubectl_rollout_status.md +docs/user-guide/kubectl/kubectl_rollout_undo.md +docs/user-guide/kubectl/kubectl_run.md +docs/user-guide/kubectl/kubectl_scale.md +docs/user-guide/kubectl/kubectl_set.md +docs/user-guide/kubectl/kubectl_set_image.md +docs/user-guide/kubectl/kubectl_taint.md +docs/user-guide/kubectl/kubectl_top.md +docs/user-guide/kubectl/kubectl_top_node.md +docs/user-guide/kubectl/kubectl_top_pod.md +docs/user-guide/kubectl/kubectl_uncordon.md +docs/user-guide/kubectl/kubectl_version.md +docs/yaml/kubectl/kubectl.yaml +docs/yaml/kubectl/kubectl_annotate.yaml +docs/yaml/kubectl/kubectl_api-versions.yaml +docs/yaml/kubectl/kubectl_apply.yaml +docs/yaml/kubectl/kubectl_attach.yaml +docs/yaml/kubectl/kubectl_autoscale.yaml +docs/yaml/kubectl/kubectl_cluster-info.yaml +docs/yaml/kubectl/kubectl_completion.yaml +docs/yaml/kubectl/kubectl_config.yaml +docs/yaml/kubectl/kubectl_convert.yaml +docs/yaml/kubectl/kubectl_cordon.yaml +docs/yaml/kubectl/kubectl_create.yaml +docs/yaml/kubectl/kubectl_delete.yaml +docs/yaml/kubectl/kubectl_describe.yaml +docs/yaml/kubectl/kubectl_drain.yaml +docs/yaml/kubectl/kubectl_edit.yaml +docs/yaml/kubectl/kubectl_exec.yaml +docs/yaml/kubectl/kubectl_explain.yaml +docs/yaml/kubectl/kubectl_expose.yaml +docs/yaml/kubectl/kubectl_get.yaml +docs/yaml/kubectl/kubectl_label.yaml +docs/yaml/kubectl/kubectl_logs.yaml +docs/yaml/kubectl/kubectl_namespace.yaml +docs/yaml/kubectl/kubectl_options.yaml +docs/yaml/kubectl/kubectl_patch.yaml +docs/yaml/kubectl/kubectl_port-forward.yaml +docs/yaml/kubectl/kubectl_proxy.yaml +docs/yaml/kubectl/kubectl_replace.yaml +docs/yaml/kubectl/kubectl_rolling-update.yaml +docs/yaml/kubectl/kubectl_rollout.yaml +docs/yaml/kubectl/kubectl_run.yaml +docs/yaml/kubectl/kubectl_scale.yaml +docs/yaml/kubectl/kubectl_set.yaml +docs/yaml/kubectl/kubectl_stop.yaml +docs/yaml/kubectl/kubectl_taint.yaml +docs/yaml/kubectl/kubectl_top.yaml +docs/yaml/kubectl/kubectl_uncordon.yaml +docs/yaml/kubectl/kubectl_version.yaml diff --git a/vendor/k8s.io/kubernetes/.gitignore b/vendor/k8s.io/kubernetes/.gitignore new file mode 100644 index 000000000000..1ac0361d039d --- /dev/null +++ b/vendor/k8s.io/kubernetes/.gitignore @@ -0,0 +1,112 @@ +# OSX leaves these everywhere on SMB shares +._* + +# OSX trash +.DS_Store + +# Eclipse files +.classpath +.project +.settings/** + +# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA +.idea/ +*.iml + +# Vscode files +.vscode + +# This is where the result of the go build goes +/output*/ +/_output*/ + +# Emacs save files +*~ +\#*\# +.\#* + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist + +# Go test binaries +*.test +/hack/.test-cmd-auth + +# JUnit test output from ginkgo e2e tests +/junit*.xml + +# Mercurial files +**/.hg +**/.hg* + +# Vagrant +.vagrant +network_closure.sh + +# Local cluster env variables +/cluster/env.sh + +# Compiled binaries in third_party +/third_party/pkg + +# Also ignore etcd installed by hack/install-etcd.sh +/third_party/etcd* + +# User cluster configs +.kubeconfig + +.tags* + +# Web UI +/www/master/node_modules/ +/www/master/npm-debug.log +/www/master/shared/config/development.json + +# Karma output +/www/test_out + +# precommit temporary directories created by ./hack/verify-generated-docs.sh and ./hack/lib/util.sh +/_tmp/ +/doc_tmp/ + +# Test artifacts produced by Jenkins jobs +/_artifacts/ + +# Go dependencies installed on Jenkins +/_gopath/ + +# Config directories created by gcloud and gsutil on Jenkins +/.config/gcloud*/ +/.gsutil/ + +# CoreOS stuff +/cluster/libvirt-coreos/coreos_*.img + +# Juju Stuff +/cluster/juju/charms/* +/cluster/juju/bundles/local.yaml + +# Downloaded Kubernetes binary release +/kubernetes/ + +# direnv .envrc files +.envrc + +# Downloaded kubernetes binary release tar ball +kubernetes.tar.gz + +# generated files in any directory +# TODO(thockin): uncomment this when we stop committing the generated files. +#zz_generated.* + +# make-related metadata +/.make/ +# Just in time generated data in the source, should never be commited +/test/e2e/generated/bindata.go + +# This file used by some vendor repos (e.g. github.com/go-openapi/...) to store secret variables and should not be ignored +!\.drone\.sec diff --git a/vendor/k8s.io/kubernetes/CHANGELOG.md b/vendor/k8s.io/kubernetes/CHANGELOG.md new file mode 100644 index 000000000000..cb997db7aaf3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/CHANGELOG.md @@ -0,0 +1,1947 @@ + + +- [v1.4.0-beta.8](#v140-beta8) + - [Downloads](#downloads) + - [Changelog since v1.4.0-beta.7](#changelog-since-v140-beta7) +- [v1.4.0-beta.7](#v140-beta7) + - [Downloads](#downloads-1) + - [Changelog since v1.4.0-beta.6](#changelog-since-v140-beta6) + - [Other notable changes](#other-notable-changes) +- [v1.4.0-beta.6](#v140-beta6) + - [Downloads](#downloads-2) + - [Changelog since v1.4.0-beta.5](#changelog-since-v140-beta5) + - [Other notable changes](#other-notable-changes-1) +- [v1.4.0-beta.5](#v140-beta5) + - [Downloads](#downloads-3) + - [Changelog since v1.4.0-beta.3](#changelog-since-v140-beta3) + - [Other notable changes](#other-notable-changes-2) +- [v1.3.7](#v137) + - [Downloads](#downloads-4) + - [Changelog since v1.3.6](#changelog-since-v136) + - [Other notable changes](#other-notable-changes-3) +- [v1.4.0-beta.3](#v140-beta3) + - [Downloads](#downloads-5) + - [Changelog since v1.4.0-beta.2](#changelog-since-v140-beta2) + - [Behavior changes caused by enabling the garbage collector](#behavior-changes-caused-by-enabling-the-garbage-collector) + - [kubectl rolling-update](#kubectl-rolling-update) + - [kubectl delete](#kubectl-delete) +- [v1.4.0-beta.2](#v140-beta2) + - [Downloads](#downloads-6) + - [Changelog since v1.4.0-beta.1](#changelog-since-v140-beta1) + - [Other notable changes](#other-notable-changes-4) +- [v1.4.0-beta.1](#v140-beta1) + - [Downloads](#downloads-7) + - [Changelog since v1.4.0-alpha.3](#changelog-since-v140-alpha3) + - [Action Required](#action-required) + - [Other notable changes](#other-notable-changes-5) +- [v1.3.6](#v136) + - [Downloads](#downloads-8) + - [Changelog since v1.3.5](#changelog-since-v135) + - [Other notable changes](#other-notable-changes-6) +- [v1.4.0-alpha.3](#v140-alpha3) + - [Downloads](#downloads-9) + - [Changelog since v1.4.0-alpha.2](#changelog-since-v140-alpha2) + - [Action Required](#action-required-1) + - [Other notable changes](#other-notable-changes-7) +- [v1.3.5](#v135) + - [Downloads](#downloads-10) + - [Changelog since v1.3.4](#changelog-since-v134) + - [Other notable changes](#other-notable-changes-8) +- [v1.3.4](#v134) + - [Downloads](#downloads-11) + - [Changelog since v1.3.3](#changelog-since-v133) + - [Other notable changes](#other-notable-changes-9) +- [v1.4.0-alpha.2](#v140-alpha2) + - [Downloads](#downloads-12) + - [Changelog since v1.4.0-alpha.1](#changelog-since-v140-alpha1) + - [Action Required](#action-required-2) + - [Other notable changes](#other-notable-changes-10) +- [v1.3.3](#v133) + - [Downloads](#downloads-13) + - [Changelog since v1.3.2](#changelog-since-v132) + - [Other notable changes](#other-notable-changes-11) + - [Known Issues](#known-issues) +- [v1.3.2](#v132) + - [Downloads](#downloads-14) + - [Changelog since v1.3.1](#changelog-since-v131) + - [Other notable changes](#other-notable-changes-12) +- [v1.3.1](#v131) + - [Downloads](#downloads-15) + - [Changelog since v1.3.0](#changelog-since-v130) + - [Other notable changes](#other-notable-changes-13) +- [v1.2.6](#v126) + - [Downloads](#downloads-16) + - [Changelog since v1.2.5](#changelog-since-v125) + - [Other notable changes](#other-notable-changes-14) +- [v1.4.0-alpha.1](#v140-alpha1) + - [Downloads](#downloads-17) + - [Changelog since v1.3.0](#changelog-since-v130-1) + - [Experimental Features](#experimental-features) + - [Action Required](#action-required-3) + - [Other notable changes](#other-notable-changes-15) +- [v1.3.0](#v130) + - [Downloads](#downloads-18) + - [Highlights](#highlights) + - [Known Issues and Important Steps before Upgrading](#known-issues-and-important-steps-before-upgrading) + - [ThirdPartyResource](#thirdpartyresource) + - [kubectl](#kubectl) + - [kubernetes Core Known Issues](#kubernetes-core-known-issues) + - [Docker runtime Known Issues](#docker-runtime-known-issues) + - [Rkt runtime Known Issues](#rkt-runtime-known-issues) + - [Provider-specific Notes](#provider-specific-notes) + - [Previous Releases Included in v1.3.0](#previous-releases-included-in-v130) +- [v1.3.0-beta.3](#v130-beta3) + - [Downloads](#downloads-19) + - [Changelog since v1.3.0-beta.2](#changelog-since-v130-beta2) + - [Action Required](#action-required-4) + - [Other notable changes](#other-notable-changes-16) +- [v1.2.5](#v125) + - [Downloads](#downloads-20) + - [Changes since v1.2.4](#changes-since-v124) + - [Other notable changes](#other-notable-changes-17) +- [v1.3.0-beta.2](#v130-beta2) + - [Downloads](#downloads-21) + - [Changes since v1.3.0-beta.1](#changes-since-v130-beta1) + - [Experimental Features](#experimental-features-1) + - [Other notable changes](#other-notable-changes-18) +- [v1.3.0-beta.1](#v130-beta1) + - [Downloads](#downloads-22) + - [Changes since v1.3.0-alpha.5](#changes-since-v130-alpha5) + - [Action Required](#action-required-5) + - [Other notable changes](#other-notable-changes-19) +- [v1.3.0-alpha.5](#v130-alpha5) + - [Downloads](#downloads-23) + - [Changes since v1.3.0-alpha.4](#changes-since-v130-alpha4) + - [Action Required](#action-required-6) + - [Other notable changes](#other-notable-changes-20) +- [v1.3.0-alpha.4](#v130-alpha4) + - [Downloads](#downloads-24) + - [Changes since v1.3.0-alpha.3](#changes-since-v130-alpha3) + - [Action Required](#action-required-7) + - [Other notable changes](#other-notable-changes-21) +- [v1.2.4](#v124) + - [Downloads](#downloads-25) + - [Changes since v1.2.3](#changes-since-v123) + - [Other notable changes](#other-notable-changes-22) +- [v1.3.0-alpha.3](#v130-alpha3) + - [Downloads](#downloads-26) + - [Changes since v1.3.0-alpha.2](#changes-since-v130-alpha2) + - [Action Required](#action-required-8) + - [Other notable changes](#other-notable-changes-23) +- [v1.2.3](#v123) + - [Downloads](#downloads-27) + - [Changes since v1.2.2](#changes-since-v122) + - [Action Required](#action-required-9) + - [Other notable changes](#other-notable-changes-24) +- [v1.3.0-alpha.2](#v130-alpha2) + - [Downloads](#downloads-28) + - [Changes since v1.3.0-alpha.1](#changes-since-v130-alpha1) + - [Other notable changes](#other-notable-changes-25) +- [v1.2.2](#v122) + - [Downloads](#downloads-29) + - [Changes since v1.2.1](#changes-since-v121) + - [Other notable changes](#other-notable-changes-26) +- [v1.2.1](#v121) + - [Downloads](#downloads-30) + - [Changes since v1.2.0](#changes-since-v120) + - [Other notable changes](#other-notable-changes-27) +- [v1.3.0-alpha.1](#v130-alpha1) + - [Downloads](#downloads-31) + - [Changes since v1.2.0](#changes-since-v120-1) + - [Action Required](#action-required-10) + - [Other notable changes](#other-notable-changes-28) +- [v1.2.0](#v120) + - [Downloads](#downloads-32) + - [Changes since v1.1.1](#changes-since-v111) + - [Major Themes](#major-themes) + - [Other notable improvements](#other-notable-improvements) + - [Experimental Features](#experimental-features-2) + - [Action required](#action-required-11) + - [Known Issues](#known-issues-1) + - [Docker Known Issues](#docker-known-issues) + - [1.9.1](#191) + - [Provider-specific Notes](#provider-specific-notes-1) + - [Various](#various) + - [AWS](#aws) + - [GCE](#gce) + + + + + + +# v1.4.0-beta.8 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.4/examples) + +## Downloads + +binary | sha256 hash +------ | ----------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.4.0-beta.8/kubernetes.tar.gz) | `31701c5c675c137887b58d7914e39b4c8a9c03767c0c3d89198a52f4476278ca` + +## Changelog since v1.4.0-beta.7 + +**No notable changes for this release** + + + +# v1.4.0-beta.7 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.4/examples) + +## Downloads + +binary | sha256 hash +------ | ----------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.4.0-beta.7/kubernetes.tar.gz) | `51e8f3ebe55cfcfbe582dd6e5ea60ae125d89373477571c0faee70eff51bab31` + +## Changelog since v1.4.0-beta.6 + +### Other notable changes + +* Use a patched go1.7.1 for building linux/arm ([#32517](https://github.com/kubernetes/kubernetes/pull/32517), [@luxas](https://github.com/luxas)) +* Specific error message on failed rolling update issued by older kubectl against 1.4 master ([#32751](https://github.com/kubernetes/kubernetes/pull/32751), [@caesarxuchao](https://github.com/caesarxuchao)) + + + +# v1.4.0-beta.6 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.4/examples) + +## Downloads + +binary | sha256 hash +------ | ----------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.4.0-beta.6/kubernetes.tar.gz) | `0b0158e4745663b48c55527247d3e64cc3649f875fa7611fc7b38fa5c3b736bd` + +## Changelog since v1.4.0-beta.5 + +### Other notable changes + +* Set Dashboard UI to final 1.4 version ([#32666](https://github.com/kubernetes/kubernetes/pull/32666), [@bryk](https://github.com/bryk)) + + + +# v1.4.0-beta.5 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.4/examples) + +## Downloads + +binary | sha256 hash +------ | ----------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.4.0-beta.5/kubernetes.tar.gz) | `ec6b233b0448472e05e6820b8ea1644119ae4f9fe3a1516cf978117c19bad0a9` + +## Changelog since v1.4.0-beta.3 + +### Other notable changes + +* Bumped Heapster to v1.2.0. ([#32649](https://github.com/kubernetes/kubernetes/pull/32649), [@piosz](https://github.com/piosz)) + * More details about the release https://github.com/kubernetes/heapster/releases/tag/v1.2.0 +* Docker digest validation is too strict ([#32627](https://github.com/kubernetes/kubernetes/pull/32627), [@smarterclayton](https://github.com/smarterclayton)) +* Added new kubelet flags `--cni-bin-dir` and `--cni-conf-dir` to specify where CNI files are located. ([#32151](https://github.com/kubernetes/kubernetes/pull/32151), [@bboreham](https://github.com/bboreham)) + * Fixed CNI configuration on GCI platform when using CNI. +* make --runtime-config=api/all=true|false work ([#32582](https://github.com/kubernetes/kubernetes/pull/32582), [@jlowdermilk](https://github.com/jlowdermilk)) +* AWS: Change default networking for kube-up to kubenet ([#32239](https://github.com/kubernetes/kubernetes/pull/32239), [@zmerlynn](https://github.com/zmerlynn)) + + + +# v1.3.7 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3/examples) + +## Downloads + +binary | sha256 hash +------ | ----------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.3.7/kubernetes.tar.gz) | `ad18566a09ff87b36107c2ea238fa5e20988d7a62c85df9c8598920679fec4a1` + +## Changelog since v1.3.6 + +### Other notable changes + +* AWS: Add ap-south-1 to list of known AWS regions ([#28428](https://github.com/kubernetes/kubernetes/pull/28428), [@justinsb](https://github.com/justinsb)) +* Back porting critical vSphere bug fixes to release 1.3 ([#31993](https://github.com/kubernetes/kubernetes/pull/31993), [@dagnello](https://github.com/dagnello)) +* Back port - Openstack provider allowing more than one service port for lbaas v2 ([#32001](https://github.com/kubernetes/kubernetes/pull/32001), [@dagnello](https://github.com/dagnello)) +* Fix a bug in kubelet hostport logic which flushes KUBE-MARK-MASQ iptables chain ([#32413](https://github.com/kubernetes/kubernetes/pull/32413), [@freehan](https://github.com/freehan)) +* Fixes the panic that occurs in the federation controller manager when registering a GKE cluster to the federation. Fixes issue [#30790](https://github.com/kubernetes/kubernetes/pull/30790). ([#30940](https://github.com/kubernetes/kubernetes/pull/30940), [@madhusudancs](https://github.com/madhusudancs)) + + + +# v1.4.0-beta.3 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.4/examples) + +## Downloads + +binary | sha256 hash +------ | ----------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.4.0-beta.3/kubernetes.tar.gz) | `5a6802703c6b0b652e72166a4347fee7899c46205463f6797dc78f8086876465` + +## Changelog since v1.4.0-beta.2 + +**No notable changes for this release** + +## Behavior changes caused by enabling the garbage collector + +### kubectl rolling-update + +Old version kubectl's rolling-update command is compatible with Kubernetes 1.4 and higher **only if** you specify a new replication controller name. You will need to update to kubectl 1.4 or higher to use the rolling update command against a 1.4 cluster if you want to keep the original name, or you'll have to do two rolling updates. + +If you do happen to use old version kubectl's rolling update against a 1.4 cluster, it will fail, usually with an error message that will direct you here. If you saw that error, then don't worry, the operation succeeded except for the part where the new replication controller is renamed back to the old name. You can just do another rolling update using kubectl 1.4 or higher to change the name back: look for a replication controller that has the original name plus a random suffix. + +Unfortunately, there is a much rarer second possible failure mode: the replication controller gets renamed to the old name, but there is a duplicated set of pods in the cluster. kubectl will not report an error since it thinks its job is done. + +If this happens to you, you can wait at most 10 minutes for the replication controller to start a resync, the extra pods will then be deleted. Or, you can manually trigger a resync by change the replicas in the spec of the replication controller. + +### kubectl delete + +If you use an old version kubectl to delete a replication controller or replicaset, then after the delete command has returned, the replication controller or the replicaset will continue to exist in the key-value store for a short period of time (<1s). You probably will not notice any difference if you use kubectl manually, but you might notice it if you are using kubectl in a script. + + +# v1.4.0-beta.2 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.4/examples) + +## Downloads + +binary | sha256 hash +------ | ----------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.4.0-beta.2/kubernetes.tar.gz) | `0c6f54eb9059090c88f10a448ed5bcb6ef663abbd76c79281fd8dcb72faa6315` + +## Changelog since v1.4.0-beta.1 + +### Other notable changes + +* Fix a bug in kubelet hostport logic which flushes KUBE-MARK-MASQ iptables chain ([#32413](https://github.com/kubernetes/kubernetes/pull/32413), [@freehan](https://github.com/freehan)) +* Stick to 2.2.1 etcd ([#32404](https://github.com/kubernetes/kubernetes/pull/32404), [@caesarxuchao](https://github.com/caesarxuchao)) +* Use etcd 2.3.7 ([#32359](https://github.com/kubernetes/kubernetes/pull/32359), [@wojtek-t](https://github.com/wojtek-t)) +* AWS: Change default networking for kube-up to kubenet ([#32239](https://github.com/kubernetes/kubernetes/pull/32239), [@zmerlynn](https://github.com/zmerlynn)) +* Make sure finalizers prevent deletion on storage that supports graceful deletion ([#32351](https://github.com/kubernetes/kubernetes/pull/32351), [@caesarxuchao](https://github.com/caesarxuchao)) +* Some components like kube-dns and kube-proxy could fail to load the service account token when started within a pod. Properly handle empty configurations to try loading the service account config. ([#31947](https://github.com/kubernetes/kubernetes/pull/31947), [@smarterclayton](https://github.com/smarterclayton)) +* Use federated namespace instead of the bootstrap cluster's namespace in Ingress e2e tests. ([#32105](https://github.com/kubernetes/kubernetes/pull/32105), [@madhusudancs](https://github.com/madhusudancs)) + + + +# v1.4.0-beta.1 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.4/examples) + +## Downloads + +binary | sha256 hash +------ | ----------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.4.0-beta.1/kubernetes.tar.gz) | `837296455933629b6792a8954f2c5b17d55c1149c12b644101f2f02549d06d25` + +## Changelog since v1.4.0-alpha.3 + +### Action Required + +* The NamespaceExists and NamespaceAutoProvision admission controllers have been removed. ([#31250](https://github.com/kubernetes/kubernetes/pull/31250), [@derekwaynecarr](https://github.com/derekwaynecarr)) + * All cluster operators should use NamespaceLifecycle. +* Federation binaries and their corresponding docker images - `federation-apiserver` and `federation-controller-manager` are now folded in to the `hyperkube` binary. If you were using one of these binaries or docker images, please switch to using the `hyperkube` version. Please refer to the federation manifests - `federation/manifests/federation-apiserver.yaml` and `federation/manifests/federation-controller-manager-deployment.yaml` for examples. ([#29929](https://github.com/kubernetes/kubernetes/pull/29929), [@madhusudancs](https://github.com/madhusudancs)) +* Use upgraded container-vm by default on worker nodes for GCE k8s clusters ([#31023](https://github.com/kubernetes/kubernetes/pull/31023), [@vishh](https://github.com/vishh)) + +### Other notable changes + +* Enable kubelet eviction whenever inodes free is < 5% on GCE ([#31545](https://github.com/kubernetes/kubernetes/pull/31545), [@vishh](https://github.com/vishh)) +* Move StorageClass to a storage group ([#31886](https://github.com/kubernetes/kubernetes/pull/31886), [@deads2k](https://github.com/deads2k)) +* Some components like kube-dns and kube-proxy could fail to load the service account token when started within a pod. Properly handle empty configurations to try loading the service account config. ([#31947](https://github.com/kubernetes/kubernetes/pull/31947), [@smarterclayton](https://github.com/smarterclayton)) +* Removed comments in json config when using kubectl edit with -o json ([#31685](https://github.com/kubernetes/kubernetes/pull/31685), [@jellonek](https://github.com/jellonek)) +* fixes invalid null selector issue in sysdig example yaml ([#31393](https://github.com/kubernetes/kubernetes/pull/31393), [@baldwinSPC](https://github.com/baldwinSPC)) +* Rescheduler which ensures that critical pods are always scheduled enabled by default in GCE. ([#31974](https://github.com/kubernetes/kubernetes/pull/31974), [@piosz](https://github.com/piosz)) +* retry oauth token fetch in gce cloudprovider ([#32021](https://github.com/kubernetes/kubernetes/pull/32021), [@mikedanese](https://github.com/mikedanese)) +* Deprecate the old cbr0 and flannel networking modes ([#31197](https://github.com/kubernetes/kubernetes/pull/31197), [@freehan](https://github.com/freehan)) +* AWS: fix volume device assignment race condition ([#31090](https://github.com/kubernetes/kubernetes/pull/31090), [@justinsb](https://github.com/justinsb)) +* The certificates API group has been renamed to certificates.k8s.io ([#31887](https://github.com/kubernetes/kubernetes/pull/31887), [@liggitt](https://github.com/liggitt)) +* Increase Dashboard UI version to v1.4.0-beta2 ([#31518](https://github.com/kubernetes/kubernetes/pull/31518), [@bryk](https://github.com/bryk)) +* Fixed incomplete kubectl bash completion. ([#31333](https://github.com/kubernetes/kubernetes/pull/31333), [@xingzhou](https://github.com/xingzhou)) +* Added liveness probe to Heapster service. ([#31878](https://github.com/kubernetes/kubernetes/pull/31878), [@mksalawa](https://github.com/mksalawa)) +* Adding clusters to the list of valid resources printed by kubectl help ([#31719](https://github.com/kubernetes/kubernetes/pull/31719), [@nikhiljindal](https://github.com/nikhiljindal)) +* Kubernetes server components using `kubeconfig` files no longer default to `http://localhost:8080`. Administrators must specify a server value in their kubeconfig files. ([#30808](https://github.com/kubernetes/kubernetes/pull/30808), [@smarterclayton](https://github.com/smarterclayton)) +* Update influxdb to 0.12 ([#31519](https://github.com/kubernetes/kubernetes/pull/31519), [@piosz](https://github.com/piosz)) +* Include security options in the container created event ([#31557](https://github.com/kubernetes/kubernetes/pull/31557), [@timstclair](https://github.com/timstclair)) +* Federation can now be deployed using the `federation/deploy/deploy.sh` script. This script does not depend on any of the development environment shell library/scripts. This is an alternative to the current `federation-up.sh`/`federation-down.sh` scripts. Both the scripts are going to co-exist in this release, but the `federation-up.sh`/`federation-down.sh` scripts might be removed in a future release in favor of `federation/deploy/deploy.sh` script. ([#30744](https://github.com/kubernetes/kubernetes/pull/30744), [@madhusudancs](https://github.com/madhusudancs)) +* Add get/delete cluster, delete context to kubectl config ([#29821](https://github.com/kubernetes/kubernetes/pull/29821), [@alexbrand](https://github.com/alexbrand)) +* rkt: Force `rkt fetch` to fetch from remote to conform the image pull policy. ([#31378](https://github.com/kubernetes/kubernetes/pull/31378), [@yifan-gu](https://github.com/yifan-gu)) +* Allow services which use same port, different protocol to use the same nodePort for both ([#30253](https://github.com/kubernetes/kubernetes/pull/30253), [@AdoHe](https://github.com/AdoHe)) +* Handle overlapping deployments gracefully ([#30730](https://github.com/kubernetes/kubernetes/pull/30730), [@janetkuo](https://github.com/janetkuo)) +* Remove environment variables and internal Kubernetes Docker labels from cAdvisor Prometheus metric labels. ([#31064](https://github.com/kubernetes/kubernetes/pull/31064), [@grobie](https://github.com/grobie)) + * Old behavior: + * - environment variables explicitly whitelisted via --docker-env-metadata-whitelist were exported as `container_env_*=*`. Default is zero so by default non were exported + * - all docker labels were exported as `container_label_*=*` + * New behavior: + * - Only `container_name`, `pod_name`, `namespace`, `id`, `image`, and `name` labels are exposed + * - no environment variables will be exposed ever via /metrics, even if whitelisted +* Filter duplicate network packets in promiscuous bridge mode (with ebtables) ([#28717](https://github.com/kubernetes/kubernetes/pull/28717), [@freehan](https://github.com/freehan)) +* Refactor to simplify the hard-traveled path of the KubeletConfiguration object ([#29216](https://github.com/kubernetes/kubernetes/pull/29216), [@mtaufen](https://github.com/mtaufen)) +* Fix overflow issue in controller-manager rate limiter ([#31396](https://github.com/kubernetes/kubernetes/pull/31396), [@foxish](https://github.com/foxish)) + + + +# v1.3.6 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3/examples) + +## Downloads + +binary | sha256 hash +------ | ----------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.3.6/kubernetes.tar.gz) | `2db7ace2f72a2e162329a6dc969a5a158bb8c5d0f8054c5b1b2b1063aa22020d` + +## Changelog since v1.3.5 + +### Other notable changes + +* Addresses vSphere Volume Attach limits ([#29881](https://github.com/kubernetes/kubernetes/pull/29881), [@dagnello](https://github.com/dagnello)) +* Increase request timeout based on termination grace period ([#31275](https://github.com/kubernetes/kubernetes/pull/31275), [@dims](https://github.com/dims)) +* Skip safe to detach check if node API object no longer exists ([#30737](https://github.com/kubernetes/kubernetes/pull/30737), [@saad-ali](https://github.com/saad-ali)) +* Nodecontroller doesn't flip readiness on pods if kubeletVersion < 1.2.0 ([#30828](https://github.com/kubernetes/kubernetes/pull/30828), [@bprashanth](https://github.com/bprashanth)) +* Update cadvisor to v0.23.9 to fix a problem where attempting to gather container filesystem usage statistics could result in corrupted devicemapper thin pool storage for Docker. ([#30307](https://github.com/kubernetes/kubernetes/pull/30307), [@sjenning](https://github.com/sjenning)) + + + +# v1.4.0-alpha.3 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/master/examples) + +## Downloads + +binary | sha256 hash +------ | ----------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.4.0-alpha.3/kubernetes.tar.gz) | `8055f0373e3b6bdee865749ef9bcfc765396a40f39ec2fa3cd31b675d1bbf5d9` + +## Changelog since v1.4.0-alpha.2 + +### Action Required + +* Moved init-container feature from alpha to beta. ([#31026](https://github.com/kubernetes/kubernetes/pull/31026), [@erictune](https://github.com/erictune)) + * Security Action Required: + * This only applies to you if you use the PodSecurityPolicy feature. You are using that feature if `kubectl get podsecuritypolicy` returns one or more objects. If it returns an error, you are not using it. + * If there are any pods with the key `pods.beta.kubernetes.io/init-containers`, then that pod may not have been filtered by the PodSecurityPolicy. You should find such pods and either delete them or audit them to ensure they do not use features that you intend to be blocked by PodSecurityPolicy. + * Explanation of Feature + * In 1.3, an init container is specified with this annotation key + * on the pod or pod template: `pods.alpha.kubernetes.io/init-containers`. + * In 1.4, either that key or this key: `pods.beta.kubernetes.io/init-containers`, + * can be used. + * When you GET an object, you will see both annotation keys with the same values. + * You can safely roll back from 1.4 to 1.3, and things with init-containers + * will still work (pods, deployments, etc). + * If you are running 1.3, only use the alpha annotation, or it may be lost when + * rolling forward. + * The status has moved from annotation key + * `pods.beta.kubernetes.io/init-container-statuses` to + * `pods.beta.kubernetes.io/init-container-statuses`. + * Any code that inspects this annotation should be changed to use the new key. + * State of Initialization will continue to be reported in both pods.alpha.kubernetes.io/initialized + * and in `podStatus.conditions.{status: "True", type: Initialized}` +* Action required: federation-only: Please update your cluster name to be a valid DNS label. ([#30956](https://github.com/kubernetes/kubernetes/pull/30956), [@nikhiljindal](https://github.com/nikhiljindal)) + * Updating federation.v1beta1.Cluster API to disallow subdomains as valid cluster names. Only DNS labels are allowed as valid cluster names now. +* [Kubelet] Rename `--config` to `--pod-manifest-path`. `--config` is deprecated. ([#29999](https://github.com/kubernetes/kubernetes/pull/29999), [@mtaufen](https://github.com/mtaufen)) + +### Other notable changes + +* rkt: Improve support for privileged pod (pod whose all containers are privileged) ([#31286](https://github.com/kubernetes/kubernetes/pull/31286), [@yifan-gu](https://github.com/yifan-gu)) +* The pod annotation `security.alpha.kubernetes.io/sysctls` now allows customization of namespaced and well isolated kernel parameters (sysctls), starting with `kernel.shm_rmid_forced`, `net.ipv4.ip_local_port_range` and `net.ipv4.tcp_syncookies` for Kubernetes 1.4. ([#27180](https://github.com/kubernetes/kubernetes/pull/27180), [@sttts](https://github.com/sttts)) + * The pod annotation `security.alpha.kubernetes.io/unsafe-sysctls` allows customization of namespaced sysctls where isolation is unclear. Unsafe sysctls must be enabled at-your-own-risk on the kubelet with the `--experimental-allowed-unsafe-sysctls` flag. Future versions will improve on resource isolation and more sysctls will be considered safe. +* Increase request timeout based on termination grace period ([#31275](https://github.com/kubernetes/kubernetes/pull/31275), [@dims](https://github.com/dims)) +* Fixed two issues of kubectl bash completion. ([#31135](https://github.com/kubernetes/kubernetes/pull/31135), [@xingzhou](https://github.com/xingzhou)) +* Reduced size of fluentd images. ([#31239](https://github.com/kubernetes/kubernetes/pull/31239), [@aledbf](https://github.com/aledbf)) +* support Azure data disk volume ([#29836](https://github.com/kubernetes/kubernetes/pull/29836), [@rootfs](https://github.com/rootfs)) +* fix Openstack provider to allow more than one service port for lbaas v2 ([#30649](https://github.com/kubernetes/kubernetes/pull/30649), [@dagnello](https://github.com/dagnello)) +* Add kubelet --network-plugin-mtu flag for MTU selection ([#30376](https://github.com/kubernetes/kubernetes/pull/30376), [@justinsb](https://github.com/justinsb)) +* Let Services preserve client IPs and not double-hop from external LBs (alpha) ([#29409](https://github.com/kubernetes/kubernetes/pull/29409), [@girishkalele](https://github.com/girishkalele)) +* [Kubelet] Optionally consume configuration from named config maps ([#30090](https://github.com/kubernetes/kubernetes/pull/30090), [@mtaufen](https://github.com/mtaufen)) +* [GarbageCollector] Allow per-resource default garbage collection behavior ([#30838](https://github.com/kubernetes/kubernetes/pull/30838), [@caesarxuchao](https://github.com/caesarxuchao)) +* Action required: If you have a running federation control plane, you will have to ensure that for all federation resources, the corresponding namespace exists in federation control plane. ([#31139](https://github.com/kubernetes/kubernetes/pull/31139), [@nikhiljindal](https://github.com/nikhiljindal)) + * federation-apiserver now supports NamespaceLifecycle admission control, which is enabled by default. Set the --admission-control flag on the server to change that. +* Configure webhook ([#30923](https://github.com/kubernetes/kubernetes/pull/30923), [@Q-Lee](https://github.com/Q-Lee)) +* Federated Ingress Controller ([#30419](https://github.com/kubernetes/kubernetes/pull/30419), [@quinton-hoole](https://github.com/quinton-hoole)) +* Federation replicaset controller ([#29741](https://github.com/kubernetes/kubernetes/pull/29741), [@jianhuiz](https://github.com/jianhuiz)) +* AWS: More ELB attributes via service annotations ([#30695](https://github.com/kubernetes/kubernetes/pull/30695), [@krancour](https://github.com/krancour)) +* Impersonate user extra ([#30881](https://github.com/kubernetes/kubernetes/pull/30881), [@deads2k](https://github.com/deads2k)) +* DNS, Heapster and UI are critical addons ([#30995](https://github.com/kubernetes/kubernetes/pull/30995), [@piosz](https://github.com/piosz)) +* AWS: Support HTTP->HTTP mode for ELB ([#30563](https://github.com/kubernetes/kubernetes/pull/30563), [@knarz](https://github.com/knarz)) +* kube-up: Allow IP restrictions for SSH and HTTPS API access on AWS. ([#27061](https://github.com/kubernetes/kubernetes/pull/27061), [@Naddiseo](https://github.com/Naddiseo)) +* Add readyReplicas to replica sets ([#29481](https://github.com/kubernetes/kubernetes/pull/29481), [@kargakis](https://github.com/kargakis)) +* The implicit registration of Prometheus metrics for request count and latency have been removed, and a plug-able interface was added. If you were using our client libraries in your own binaries and want these metrics, add the following to your imports in the main package: "k8s.io/pkg/client/metrics/prometheus". ([#30638](https://github.com/kubernetes/kubernetes/pull/30638), [@krousey](https://github.com/krousey)) +* Add support for --image-pull-policy to 'kubectl run' ([#30614](https://github.com/kubernetes/kubernetes/pull/30614), [@AdoHe](https://github.com/AdoHe)) +* x509 authenticator: get groups from subject's organization field ([#30392](https://github.com/kubernetes/kubernetes/pull/30392), [@ericchiang](https://github.com/ericchiang)) +* Add initial support for TokenFile to to the client config file. ([#29696](https://github.com/kubernetes/kubernetes/pull/29696), [@brendandburns](https://github.com/brendandburns)) +* update kubectl help output for better organization ([#25524](https://github.com/kubernetes/kubernetes/pull/25524), [@AdoHe](https://github.com/AdoHe)) +* daemonset controller should respect taints ([#31020](https://github.com/kubernetes/kubernetes/pull/31020), [@mikedanese](https://github.com/mikedanese)) +* Implement TLS bootstrap for kubelet using `--experimental-bootstrap-kubeconfig` (2nd take) ([#30922](https://github.com/kubernetes/kubernetes/pull/30922), [@yifan-gu](https://github.com/yifan-gu)) +* rkt: Support subPath volume mounts feature ([#30934](https://github.com/kubernetes/kubernetes/pull/30934), [@yifan-gu](https://github.com/yifan-gu)) +* Return container command exit codes in kubectl run/exec ([#26541](https://github.com/kubernetes/kubernetes/pull/26541), [@sttts](https://github.com/sttts)) +* Fix kubectl describe to display a container's resource limit env vars as node allocatable when the limits are not set ([#29849](https://github.com/kubernetes/kubernetes/pull/29849), [@aveshagarwal](https://github.com/aveshagarwal)) +* The `valueFrom.fieldRef.name` field on environment variables in pods and objects with pod templates now allows two additional fields to be used: ([#27880](https://github.com/kubernetes/kubernetes/pull/27880), [@smarterclayton](https://github.com/smarterclayton)) + * `spec.nodeName` will return the name of the node this pod is running on + * `spec.serviceAccountName` will return the name of the service account this pod is running under +* Adding ImagePolicyWebhook admission controller. ([#30631](https://github.com/kubernetes/kubernetes/pull/30631), [@ecordell](https://github.com/ecordell)) +* Validate involvedObject.Namespace matches event.Namespace ([#30533](https://github.com/kubernetes/kubernetes/pull/30533), [@liggitt](https://github.com/liggitt)) +* allow group impersonation ([#30803](https://github.com/kubernetes/kubernetes/pull/30803), [@deads2k](https://github.com/deads2k)) +* Always return command output for exec probes and kubelet RunInContainer ([#30731](https://github.com/kubernetes/kubernetes/pull/30731), [@ncdc](https://github.com/ncdc)) +* Enable the garbage collector by default ([#30480](https://github.com/kubernetes/kubernetes/pull/30480), [@caesarxuchao](https://github.com/caesarxuchao)) +* NONE ([#30599](https://github.com/kubernetes/kubernetes/pull/30599), [@therc](https://github.com/therc)) +* use valid_resources to replace kubectl.PossibleResourceTypes ([#30955](https://github.com/kubernetes/kubernetes/pull/30955), [@lojies](https://github.com/lojies)) +* oidc auth provider: don't trim issuer URL ([#30944](https://github.com/kubernetes/kubernetes/pull/30944), [@ericchiang](https://github.com/ericchiang)) +* Add a short `-n` for `kubectl --namespace` ([#30630](https://github.com/kubernetes/kubernetes/pull/30630), [@silasbw](https://github.com/silasbw)) +* Federated secret controller ([#30669](https://github.com/kubernetes/kubernetes/pull/30669), [@kshafiee](https://github.com/kshafiee)) +* Add Events for operation_executor to show status of mounts, failed/successful to show in describe events ([#27778](https://github.com/kubernetes/kubernetes/pull/27778), [@screeley44](https://github.com/screeley44)) +* Alpha support for OpenAPI (aka. Swagger 2.0) specification served on /swagger.json (enabled by default) ([#30233](https://github.com/kubernetes/kubernetes/pull/30233), [@mbohlool](https://github.com/mbohlool)) +* Disable linux/ppc64le compilation by default ([#30659](https://github.com/kubernetes/kubernetes/pull/30659), [@ixdy](https://github.com/ixdy)) +* Implement dynamic provisioning (beta) of PersistentVolumes via StorageClass ([#29006](https://github.com/kubernetes/kubernetes/pull/29006), [@jsafrane](https://github.com/jsafrane)) +* Allow setting permission mode bits on secrets, configmaps and downwardAPI files ([#28936](https://github.com/kubernetes/kubernetes/pull/28936), [@rata](https://github.com/rata)) +* Skip safe to detach check if node API object no longer exists ([#30737](https://github.com/kubernetes/kubernetes/pull/30737), [@saad-ali](https://github.com/saad-ali)) +* The Kubelet now supports the `--require-kubeconfig` option which reads all client config from the provided `--kubeconfig` file and will cause the Kubelet to exit with error code 1 on error. It also forces the Kubelet to use the server URL from the kubeconfig file rather than the `--api-servers` flag. Without this flag set, a failure to read the kubeconfig file would only result in a warning message. ([#30798](https://github.com/kubernetes/kubernetes/pull/30798), [@smarterclayton](https://github.com/smarterclayton)) + * In a future release, the value of this flag will be defaulted to `true`. +* Adding container image verification webhook API. ([#30241](https://github.com/kubernetes/kubernetes/pull/30241), [@Q-Lee](https://github.com/Q-Lee)) +* Nodecontroller doesn't flip readiness on pods if kubeletVersion < 1.2.0 ([#30828](https://github.com/kubernetes/kubernetes/pull/30828), [@bprashanth](https://github.com/bprashanth)) +* AWS: Handle kube-down case where the LaunchConfig is dangling ([#30816](https://github.com/kubernetes/kubernetes/pull/30816), [@zmerlynn](https://github.com/zmerlynn)) +* kubectl will no longer do client-side defaulting on create and replace. ([#30250](https://github.com/kubernetes/kubernetes/pull/30250), [@krousey](https://github.com/krousey)) +* Added warning msg for `kubectl get` ([#28352](https://github.com/kubernetes/kubernetes/pull/28352), [@vefimova](https://github.com/vefimova)) +* Removed support for HPA in extensions client. ([#30504](https://github.com/kubernetes/kubernetes/pull/30504), [@piosz](https://github.com/piosz)) +* Implement DisruptionController. ([#25921](https://github.com/kubernetes/kubernetes/pull/25921), [@mml](https://github.com/mml)) +* [Kubelet] Check if kubelet is running as uid 0 ([#30466](https://github.com/kubernetes/kubernetes/pull/30466), [@vishh](https://github.com/vishh)) +* Fix third party APIResource reporting ([#29724](https://github.com/kubernetes/kubernetes/pull/29724), [@brendandburns](https://github.com/brendandburns)) +* speed up RC scaler ([#30383](https://github.com/kubernetes/kubernetes/pull/30383), [@deads2k](https://github.com/deads2k)) +* Set pod state as "unknown" when CNI plugin fails ([#30137](https://github.com/kubernetes/kubernetes/pull/30137), [@nhlfr](https://github.com/nhlfr)) +* Cluster Federation components can now be built and deployed using the make command. Please see federation/README.md for details. ([#29515](https://github.com/kubernetes/kubernetes/pull/29515), [@madhusudancs](https://github.com/madhusudancs)) +* Adding events to federation control plane ([#30421](https://github.com/kubernetes/kubernetes/pull/30421), [@nikhiljindal](https://github.com/nikhiljindal)) +* [kubelet] Introduce --protect-kernel-defaults flag to make the tunable behaviour configurable ([#27874](https://github.com/kubernetes/kubernetes/pull/27874), [@ingvagabund](https://github.com/ingvagabund)) +* Add support for kube-up.sh to deploy Calico network policy to GCI masters ([#29037](https://github.com/kubernetes/kubernetes/pull/29037), [@matthewdupre](https://github.com/matthewdupre)) +* Added 'kubectl top' command showing the resource usage metrics. ([#28844](https://github.com/kubernetes/kubernetes/pull/28844), [@mksalawa](https://github.com/mksalawa)) +* Add basic audit logging ([#27087](https://github.com/kubernetes/kubernetes/pull/27087), [@soltysh](https://github.com/soltysh)) +* Marked NodePhase deprecated. ([#30005](https://github.com/kubernetes/kubernetes/pull/30005), [@dchen1107](https://github.com/dchen1107)) +* Name the job created by scheduledjob (sj) deterministically with sj's name and a hash of job's scheduled time. ([#30420](https://github.com/kubernetes/kubernetes/pull/30420), [@janetkuo](https://github.com/janetkuo)) +* add metrics for workqueues ([#30296](https://github.com/kubernetes/kubernetes/pull/30296), [@deads2k](https://github.com/deads2k)) +* Adding ingress resource to federation apiserver ([#30112](https://github.com/kubernetes/kubernetes/pull/30112), [@nikhiljindal](https://github.com/nikhiljindal)) +* Update Dashboard UI to version v1.1.1 ([#30273](https://github.com/kubernetes/kubernetes/pull/30273), [@bryk](https://github.com/bryk)) +* Update etcd 2.2 references to use 3.0.x ([#29399](https://github.com/kubernetes/kubernetes/pull/29399), [@timothysc](https://github.com/timothysc)) +* HPA: ignore scale targets whose replica count is 0 ([#29212](https://github.com/kubernetes/kubernetes/pull/29212), [@sjenning](https://github.com/sjenning)) +* Add total inodes to kubelet summary api ([#30231](https://github.com/kubernetes/kubernetes/pull/30231), [@derekwaynecarr](https://github.com/derekwaynecarr)) +* Updates required for juju kubernetes to use the tls-terminated etcd charm. ([#30104](https://github.com/kubernetes/kubernetes/pull/30104), [@mbruzek](https://github.com/mbruzek)) +* Fix PVC.Status.Capacity and AccessModes after binding ([#29982](https://github.com/kubernetes/kubernetes/pull/29982), [@jsafrane](https://github.com/jsafrane)) +* allow a read-only rbd image mounted by multiple pods ([#29622](https://github.com/kubernetes/kubernetes/pull/29622), [@rootfs](https://github.com/rootfs)) +* [kubelet] Auto-discover node IP if neither cloud provider exists and IP is not explicitly specified ([#29907](https://github.com/kubernetes/kubernetes/pull/29907), [@luxas](https://github.com/luxas)) +* kubectl config set-crentials: add arguments for auth providers ([#30007](https://github.com/kubernetes/kubernetes/pull/30007), [@ericchiang](https://github.com/ericchiang)) +* Scheduledjob controller ([#29137](https://github.com/kubernetes/kubernetes/pull/29137), [@janetkuo](https://github.com/janetkuo)) +* add subjectaccessreviews resource ([#20573](https://github.com/kubernetes/kubernetes/pull/20573), [@deads2k](https://github.com/deads2k)) +* AWS/GCE: Rework use of master name ([#30047](https://github.com/kubernetes/kubernetes/pull/30047), [@zmerlynn](https://github.com/zmerlynn)) +* Add density (batch pods creation latency and resource) and resource performance tests to `test-e2e-node' built for Linux only ([#30026](https://github.com/kubernetes/kubernetes/pull/30026), [@coufon](https://github.com/coufon)) +* Clean up items from moving local cluster setup guides ([#30035](https://github.com/kubernetes/kubernetes/pull/30035), [@pwittrock](https://github.com/pwittrock)) +* federation: Adding secret API ([#29138](https://github.com/kubernetes/kubernetes/pull/29138), [@kshafiee](https://github.com/kshafiee)) +* Introducing ScheduledJobs as described in [the proposal](docs/proposals/scheduledjob.md) as part of `batch/v2alpha1` version (experimental feature). ([#25816](https://github.com/kubernetes/kubernetes/pull/25816), [@soltysh](https://github.com/soltysh)) +* Node disk pressure should induce image gc ([#29880](https://github.com/kubernetes/kubernetes/pull/29880), [@derekwaynecarr](https://github.com/derekwaynecarr)) +* oidc authentication plugin: don't trim issuer URLs with trailing slashes ([#29860](https://github.com/kubernetes/kubernetes/pull/29860), [@ericchiang](https://github.com/ericchiang)) +* Allow leading * in ingress hostname ([#29204](https://github.com/kubernetes/kubernetes/pull/29204), [@aledbf](https://github.com/aledbf)) +* Rewrite service controller to apply best controller pattern ([#25189](https://github.com/kubernetes/kubernetes/pull/25189), [@mfanjie](https://github.com/mfanjie)) +* Fix issue with kubectl annotate when --resource-version is provided. ([#29319](https://github.com/kubernetes/kubernetes/pull/29319), [@juanvallejo](https://github.com/juanvallejo)) +* Reverted conversion of influx-db to Pet Set, it is now a Replication Controller. ([#30080](https://github.com/kubernetes/kubernetes/pull/30080), [@jszczepkowski](https://github.com/jszczepkowski)) +* rbac validation: rules can't combine non-resource URLs and regular resources ([#29930](https://github.com/kubernetes/kubernetes/pull/29930), [@ericchiang](https://github.com/ericchiang)) +* VSAN support for VSphere Volume Plugin ([#29172](https://github.com/kubernetes/kubernetes/pull/29172), [@abrarshivani](https://github.com/abrarshivani)) +* Addresses vSphere Volume Attach limits ([#29881](https://github.com/kubernetes/kubernetes/pull/29881), [@dagnello](https://github.com/dagnello)) +* allow restricting subresource access ([#29988](https://github.com/kubernetes/kubernetes/pull/29988), [@deads2k](https://github.com/deads2k)) +* Add density (batch pods creation latency and resource) and resource performance tests to `test-e2e-node' ([#29764](https://github.com/kubernetes/kubernetes/pull/29764), [@coufon](https://github.com/coufon)) +* Allow Secret & ConfigMap keys to contain caps, dots, and underscores ([#25458](https://github.com/kubernetes/kubernetes/pull/25458), [@errm](https://github.com/errm)) +* allow watching old resources with kubectl ([#27392](https://github.com/kubernetes/kubernetes/pull/27392), [@sjenning](https://github.com/sjenning)) +* azure: kube-up respects AZURE_RESOURCE_GROUP ([#28700](https://github.com/kubernetes/kubernetes/pull/28700), [@colemickens](https://github.com/colemickens)) +* Modified influxdb petset to provision persistent volume. ([#28840](https://github.com/kubernetes/kubernetes/pull/28840), [@jszczepkowski](https://github.com/jszczepkowski)) +* Allow service names up to 63 characters (RFC 1035) ([#29523](https://github.com/kubernetes/kubernetes/pull/29523), [@fraenkel](https://github.com/fraenkel)) +* Change eviction policies in NodeController: ([#28897](https://github.com/kubernetes/kubernetes/pull/28897), [@gmarek](https://github.com/gmarek)) + * - add a "partialDisruption" mode, when more than 33% of Nodes in the zone are not Ready + * - add "fullDisruption" mode, when all Nodes in the zone are not Ready + * Eviction behavior depends on the mode in which NodeController is operating: + * - if the new state is "partialDisruption" or "fullDisruption" we call a user defined function that returns a new QPS to use (default 1/10 of the default rate, and the default rate respectively), + * - if the new state is "normal" we resume normal operation (go back to default limiter settings), + * - if all zones in the cluster are in "fullDisruption" state we stop all evictions. +* Add a flag for `kubectl expose`to set ClusterIP and allow headless services ([#28239](https://github.com/kubernetes/kubernetes/pull/28239), [@ApsOps](https://github.com/ApsOps)) +* Add support to quota pvc storage requests ([#28636](https://github.com/kubernetes/kubernetes/pull/28636), [@derekwaynecarr](https://github.com/derekwaynecarr)) + + + +# v1.3.5 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3/examples) + +## Downloads + +binary | sha256 hash +------ | ----------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.3.5/kubernetes.tar.gz) | `46be88ce927124f7cef7e280720b42c63051086880b7ebdba298b561dbe19f82` + +## Changelog since v1.3.4 + +### Other notable changes + +* Update Dashboard UI to version v1.1.1 ([#30273](https://github.com/kubernetes/kubernetes/pull/30273), [@bryk](https://github.com/bryk)) +* allow restricting subresource access ([#30001](https://github.com/kubernetes/kubernetes/pull/30001), [@deads2k](https://github.com/deads2k)) +* Fix PVC.Status.Capacity and AccessModes after binding ([#29982](https://github.com/kubernetes/kubernetes/pull/29982), [@jsafrane](https://github.com/jsafrane)) +* oidc authentication plugin: don't trim issuer URLs with trailing slashes ([#29860](https://github.com/kubernetes/kubernetes/pull/29860), [@ericchiang](https://github.com/ericchiang)) +* network/cni: Bring up the `lo` interface for rkt ([#29310](https://github.com/kubernetes/kubernetes/pull/29310), [@euank](https://github.com/euank)) +* Fixing kube-up for CVM masters. ([#29140](https://github.com/kubernetes/kubernetes/pull/29140), [@maisem](https://github.com/maisem)) + + + +# v1.3.4 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3/examples) + +## Downloads + +binary | sha256 hash +------ | ----------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.3.4/kubernetes.tar.gz) | `818acc1a8ba61cff434d4c0c5aa3d342d06e6907b565cfd8651b8cfcf3f0a1e6` + +## Changelog since v1.3.3 + +### Other notable changes + +* NetworkPolicy cherry-pick 1.3 ([#29556](https://github.com/kubernetes/kubernetes/pull/29556), [@caseydavenport](https://github.com/caseydavenport)) +* Allow mounts to run in parallel for non-attachable volumes ([#28939](https://github.com/kubernetes/kubernetes/pull/28939), [@saad-ali](https://github.com/saad-ali)) +* add enhanced volume and mount logging for block devices ([#24797](https://github.com/kubernetes/kubernetes/pull/24797), [@screeley44](https://github.com/screeley44)) +* kube-up: increase download timeout for kubernetes.tar.gz ([#29426](https://github.com/kubernetes/kubernetes/pull/29426), [@justinsb](https://github.com/justinsb)) +* Fix RBAC authorizer of ServiceAccount ([#29071](https://github.com/kubernetes/kubernetes/pull/29071), [@albatross0](https://github.com/albatross0)) +* Update docker engine-api to dea108d3aa ([#29144](https://github.com/kubernetes/kubernetes/pull/29144), [@ronnielai](https://github.com/ronnielai)) +* Assume volume is detached if node doesn't exist ([#29485](https://github.com/kubernetes/kubernetes/pull/29485), [@saad-ali](https://github.com/saad-ali)) +* Make PD E2E Tests Wait for Detach to Prevent Kernel Errors ([#29031](https://github.com/kubernetes/kubernetes/pull/29031), [@saad-ali](https://github.com/saad-ali)) +* Fix "PVC Volume not detached if pod deleted via namespace deletion" issue ([#29077](https://github.com/kubernetes/kubernetes/pull/29077), [@saad-ali](https://github.com/saad-ali)) +* append an abac rule for $KUBE_USER. ([#29164](https://github.com/kubernetes/kubernetes/pull/29164), [@cjcullen](https://github.com/cjcullen)) + + + +# v1.4.0-alpha.2 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/master/examples) + +## Downloads + +binary | sha256 hash +------ | ----------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.4.0-alpha.2/kubernetes.tar.gz) | `787ce63a5149a1cb47d14c55450172e3a045d85349682d2e17ff492de9e415b9` + +## Changelog since v1.4.0-alpha.1 + +### Action Required + +* Federation API server kubeconfig secret consumed by federation-controller-manager has a new name. ([#28938](https://github.com/kubernetes/kubernetes/pull/28938), [@madhusudancs](https://github.com/madhusudancs)) + * If you are upgrading your Cluster Federation components from v1.3.x, please run this command to migrate the federation-apiserver-secret to federation-apiserver-kubeconfig serect; + * $ kubectl --namespace=federation get secret federation-apiserver-secret -o json | sed 's/federation-apiserver-secret/federation-apiserver-kubeconfig/g' | kubectl create -f - + * You might also want to delete the old secret using this command: + * $ kubectl delete secret --namespace=federation federation-apiserver-secret +* Stop eating panics ([#28800](https://github.com/kubernetes/kubernetes/pull/28800), [@lavalamp](https://github.com/lavalamp)) + +### Other notable changes + +* Add API for StorageClasses ([#29694](https://github.com/kubernetes/kubernetes/pull/29694), [@childsb](https://github.com/childsb)) +* Fix kubectl help command ([#29737](https://github.com/kubernetes/kubernetes/pull/29737), [@andreykurilin](https://github.com/andreykurilin)) +* add shorthand cm for configmaps ([#29652](https://github.com/kubernetes/kubernetes/pull/29652), [@lojies](https://github.com/lojies)) +* Bump cadvisor dependencies to latest head. ([#29492](https://github.com/kubernetes/kubernetes/pull/29492), [@Random-Liu](https://github.com/Random-Liu)) +* If a service of type node port declares multiple ports, quota on "services.nodeports" will charge for each port in the service. ([#29457](https://github.com/kubernetes/kubernetes/pull/29457), [@derekwaynecarr](https://github.com/derekwaynecarr)) +* Add an Azure CloudProvider Implementation ([#28821](https://github.com/kubernetes/kubernetes/pull/28821), [@colemickens](https://github.com/colemickens)) +* Add support for kubectl create quota command ([#28351](https://github.com/kubernetes/kubernetes/pull/28351), [@sttts](https://github.com/sttts)) +* Assume volume is detached if node doesn't exist ([#29485](https://github.com/kubernetes/kubernetes/pull/29485), [@saad-ali](https://github.com/saad-ali)) +* kube-up: increase download timeout for kubernetes.tar.gz ([#29426](https://github.com/kubernetes/kubernetes/pull/29426), [@justinsb](https://github.com/justinsb)) +* Allow multiple APIs to register for the same API Group ([#28414](https://github.com/kubernetes/kubernetes/pull/28414), [@brendandburns](https://github.com/brendandburns)) +* Fix a problem with multiple APIs clobbering each other in registration. ([#28431](https://github.com/kubernetes/kubernetes/pull/28431), [@brendandburns](https://github.com/brendandburns)) +* Removing images with multiple tags ([#29316](https://github.com/kubernetes/kubernetes/pull/29316), [@ronnielai](https://github.com/ronnielai)) +* add enhanced volume and mount logging for block devices ([#24797](https://github.com/kubernetes/kubernetes/pull/24797), [@screeley44](https://github.com/screeley44)) +* append an abac rule for $KUBE_USER. ([#29164](https://github.com/kubernetes/kubernetes/pull/29164), [@cjcullen](https://github.com/cjcullen)) +* add tokenreviews endpoint to implement webhook ([#28788](https://github.com/kubernetes/kubernetes/pull/28788), [@deads2k](https://github.com/deads2k)) +* Fix "PVC Volume not detached if pod deleted via namespace deletion" issue ([#29077](https://github.com/kubernetes/kubernetes/pull/29077), [@saad-ali](https://github.com/saad-ali)) +* Allow mounts to run in parallel for non-attachable volumes ([#28939](https://github.com/kubernetes/kubernetes/pull/28939), [@saad-ali](https://github.com/saad-ali)) +* Fix working_set calculation in kubelet ([#29153](https://github.com/kubernetes/kubernetes/pull/29153), [@vishh](https://github.com/vishh)) +* Fix RBAC authorizer of ServiceAccount ([#29071](https://github.com/kubernetes/kubernetes/pull/29071), [@albatross0](https://github.com/albatross0)) +* kubectl proxy changed to now allow urls to pods with "attach" or "exec" in the pod name ([#28765](https://github.com/kubernetes/kubernetes/pull/28765), [@nhlfr](https://github.com/nhlfr)) +* AWS: Added experimental option to skip zone check ([#28417](https://github.com/kubernetes/kubernetes/pull/28417), [@kevensen](https://github.com/kevensen)) +* Ubuntu: Enable ssh compression when downloading binaries during cluster creation ([#26746](https://github.com/kubernetes/kubernetes/pull/26746), [@MHBauer](https://github.com/MHBauer)) +* Add extensions/replicaset to federation-apiserver ([#24764](https://github.com/kubernetes/kubernetes/pull/24764), [@jianhuiz](https://github.com/jianhuiz)) +* federation: Adding namespaces API ([#26298](https://github.com/kubernetes/kubernetes/pull/26298), [@nikhiljindal](https://github.com/nikhiljindal)) +* Improve quota controller performance by eliminating unneeded list calls ([#29134](https://github.com/kubernetes/kubernetes/pull/29134), [@derekwaynecarr](https://github.com/derekwaynecarr)) +* Make Daemonset use GeneralPredicates ([#28803](https://github.com/kubernetes/kubernetes/pull/28803), [@lukaszo](https://github.com/lukaszo)) +* Update docker engine-api to dea108d3aa ([#29144](https://github.com/kubernetes/kubernetes/pull/29144), [@ronnielai](https://github.com/ronnielai)) +* Fixing kube-up for CVM masters. ([#29140](https://github.com/kubernetes/kubernetes/pull/29140), [@maisem](https://github.com/maisem)) +* Fix logrotate config on GCI ([#29139](https://github.com/kubernetes/kubernetes/pull/29139), [@adityakali](https://github.com/adityakali)) +* GCE bring-up: Differentiate NODE_TAGS from NODE_INSTANCE_PREFIX ([#29141](https://github.com/kubernetes/kubernetes/pull/29141), [@zmerlynn](https://github.com/zmerlynn)) +* hyperkube: fix build for 3rd party registry (again) ([#28489](https://github.com/kubernetes/kubernetes/pull/28489), [@liyimeng](https://github.com/liyimeng)) +* Detect flakes in PR builder e2e runs ([#27898](https://github.com/kubernetes/kubernetes/pull/27898), [@lavalamp](https://github.com/lavalamp)) +* Remove examples moved to docs site ([#23513](https://github.com/kubernetes/kubernetes/pull/23513), [@erictune](https://github.com/erictune)) +* Do not query the metadata server to find out if running on GCE. Retry metadata server query for gcr if running on gce. ([#28871](https://github.com/kubernetes/kubernetes/pull/28871), [@vishh](https://github.com/vishh)) +* Change maxsize to size in logrotate. ([#29128](https://github.com/kubernetes/kubernetes/pull/29128), [@bprashanth](https://github.com/bprashanth)) +* Change setting "kubectl --record=false" to stop updating the change-cause when a previous change-cause is found. ([#28234](https://github.com/kubernetes/kubernetes/pull/28234), [@damemi](https://github.com/damemi)) +* Add "kubectl --overwrite" flag to automatically resolve conflicts between the modified and live configuration using values from the modified configuration. ([#26136](https://github.com/kubernetes/kubernetes/pull/26136), [@AdoHe](https://github.com/AdoHe)) +* Make discovery summarizer call servers in parallel ([#26705](https://github.com/kubernetes/kubernetes/pull/26705), [@nebril](https://github.com/nebril)) +* Don't recreate lb cloud resources on kcm restart ([#29082](https://github.com/kubernetes/kubernetes/pull/29082), [@bprashanth](https://github.com/bprashanth)) +* List all nodes and occupy cidr map before starting allocations ([#29062](https://github.com/kubernetes/kubernetes/pull/29062), [@bprashanth](https://github.com/bprashanth)) +* Fix GPU resource validation ([#28743](https://github.com/kubernetes/kubernetes/pull/28743), [@therc](https://github.com/therc)) +* Make PD E2E Tests Wait for Detach to Prevent Kernel Errors ([#29031](https://github.com/kubernetes/kubernetes/pull/29031), [@saad-ali](https://github.com/saad-ali)) +* Scale kube-proxy conntrack limits by cores (new default behavior) ([#28876](https://github.com/kubernetes/kubernetes/pull/28876), [@thockin](https://github.com/thockin)) +* [Kubelet] Improving QOS in kubelet by introducing QoS level Cgroups - `--cgroups-per-qos` ([#27853](https://github.com/kubernetes/kubernetes/pull/27853), [@dubstack](https://github.com/dubstack)) +* AWS: Add ap-south-1 to list of known AWS regions ([#28428](https://github.com/kubernetes/kubernetes/pull/28428), [@justinsb](https://github.com/justinsb)) +* Add RELEASE_INFRA_PUSH related code to support pushes from kubernetes/release. ([#28922](https://github.com/kubernetes/kubernetes/pull/28922), [@david-mcmahon](https://github.com/david-mcmahon)) +* Fix watch cache filtering ([#28966](https://github.com/kubernetes/kubernetes/pull/28966), [@liggitt](https://github.com/liggitt)) +* Deprecate deleting-pods-burst ControllerManager flag ([#28882](https://github.com/kubernetes/kubernetes/pull/28882), [@gmarek](https://github.com/gmarek)) +* Add support for terminal resizing for exec, attach, and run. Note that for Docker, exec sessions ([#25273](https://github.com/kubernetes/kubernetes/pull/25273), [@ncdc](https://github.com/ncdc)) + * inherit the environment from the primary process, so if the container was created with tty=false, + * that means the exec session's TERM variable will default to "dumb". Users can override this by + * setting TERM=xterm (or whatever is appropriate) to get the correct "smart" terminal behavior. +* Implement alpha version of PreferAvoidPods ([#20699](https://github.com/kubernetes/kubernetes/pull/20699), [@jiangyaoguo](https://github.com/jiangyaoguo)) +* Retry when apiserver fails to listen on insecure port ([#28797](https://github.com/kubernetes/kubernetes/pull/28797), [@aaronlevy](https://github.com/aaronlevy)) +* Add SSH_OPTS to config ssh and scp port ([#28872](https://github.com/kubernetes/kubernetes/pull/28872), [@lojies](https://github.com/lojies)) +* kube-up: install new Docker pre-requisite (libltdl7) when not in image ([#28745](https://github.com/kubernetes/kubernetes/pull/28745), [@justinsb](https://github.com/justinsb)) +* Separate rate limiters for Pod evictions for different zones in NodeController ([#28843](https://github.com/kubernetes/kubernetes/pull/28843), [@gmarek](https://github.com/gmarek)) +* Add --quiet to hide the 'waiting for pods to be running' message in kubectl run ([#28801](https://github.com/kubernetes/kubernetes/pull/28801), [@janetkuo](https://github.com/janetkuo)) +* Controllers doesn't take any actions when being deleted. ([#27438](https://github.com/kubernetes/kubernetes/pull/27438), [@gmarek](https://github.com/gmarek)) +* Add "deploy" abbrev for deployments to kubectl ([#24087](https://github.com/kubernetes/kubernetes/pull/24087), [@Frostman](https://github.com/Frostman)) +* --no-header available now for custom-column ([#26696](https://github.com/kubernetes/kubernetes/pull/26696), [@gitfred](https://github.com/gitfred)) + + + +# v1.3.3 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3/examples) + +## Downloads + +binary | sha256 hash +------ | ----------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.3.3/kubernetes.tar.gz) | `a92a74a0d3f7d02d01ac2c8dfb5ee2e97b0485819e77b2110eb7c6b7c782478c` + +## Changelog since v1.3.2 + +### Other notable changes + +* Removing images with multiple tags ([#29316](https://github.com/kubernetes/kubernetes/pull/29316), [@ronnielai](https://github.com/ronnielai)) +* kubectl: don't display an empty list when trying to get a single resource that isn't found ([#28294](https://github.com/kubernetes/kubernetes/pull/28294), [@ncdc](https://github.com/ncdc)) +* Fix working_set calculation in kubelet ([#29154](https://github.com/kubernetes/kubernetes/pull/29154), [@vishh](https://github.com/vishh)) +* Don't delete affinity when endpoints are empty ([#28655](https://github.com/kubernetes/kubernetes/pull/28655), [@freehan](https://github.com/freehan)) +* GCE bring-up: Differentiate NODE_TAGS from NODE_INSTANCE_PREFIX ([#29141](https://github.com/kubernetes/kubernetes/pull/29141), [@zmerlynn](https://github.com/zmerlynn)) +* Fix logrotate config on GCI ([#29139](https://github.com/kubernetes/kubernetes/pull/29139), [@adityakali](https://github.com/adityakali)) +* Do not query the metadata server to find out if running on GCE. Retry metadata server query for gcr if running on gce. ([#28871](https://github.com/kubernetes/kubernetes/pull/28871), [@vishh](https://github.com/vishh)) +* Fix GPU resource validation ([#28743](https://github.com/kubernetes/kubernetes/pull/28743), [@therc](https://github.com/therc)) +* Scale kube-proxy conntrack limits by cores (new default behavior) ([#28876](https://github.com/kubernetes/kubernetes/pull/28876), [@thockin](https://github.com/thockin)) +* Don't recreate lb cloud resources on kcm restart ([#29082](https://github.com/kubernetes/kubernetes/pull/29082), [@bprashanth](https://github.com/bprashanth)) + +## Known Issues + +There are a number of known issues that have been found and are being worked on. +Please be aware of them as you test your workloads. + +* PVC Volume not detached if pod deleted via namespace deletion ([29051](https://github.com/kubernetes/kubernetes/issues/29051)) +* Google Compute Engine PD Detach fails if node no longer exists ([29358](https://github.com/kubernetes/kubernetes/issues/29358)) +* Mounting (only 'default-token') volume takes a long time when creating a batch of pods (parallelization issue) ([28616](https://github.com/kubernetes/kubernetes/issues/28616)) +* Error while tearing down pod, "device or resource busy" on service account secret ([28750](https://github.com/kubernetes/kubernetes/issues/28750)) + + + +# v1.3.2 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.3.2/kubernetes.tar.gz) | `f46664d04dc2966c77d8727bba57f57b5f917572` | `1a5b0639941054585d0432dd5ce3abc7` + +## Changelog since v1.3.1 + +### Other notable changes + +* List all nodes and occupy cidr map before starting allocations ([#29062](https://github.com/kubernetes/kubernetes/pull/29062), [@bprashanth](https://github.com/bprashanth)) +* Fix watch cache filtering ([#28968](https://github.com/kubernetes/kubernetes/pull/28968), [@liggitt](https://github.com/liggitt)) +* Lock all possible kubecfg files at the beginning of ModifyConfig. ([#28232](https://github.com/kubernetes/kubernetes/pull/28232), [@cjcullen](https://github.com/cjcullen)) + + + +# v1.3.1 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3.0/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.3.1/kubernetes.tar.gz) | `5645b12beda22137204439de8260c62c9925f89b` | `ae6e9902ec70c1322d9a0a29ef385190` + +## Changelog since v1.3.0 + +### Other notable changes + +* Fix watch cache filtering ([#29046](https://github.com/kubernetes/kubernetes/pull/29046), [@liggitt](https://github.com/liggitt)) + + + +# v1.2.6 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.2/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.2.6/kubernetes.tar.gz) | `50023455d00af52c41a7158b4bd117b2dfd4a100` | `cf0411bcb620eb13b08b93578efffc43` + +## Changelog since v1.2.5 + +### Other notable changes + +* Fix watch cache filtering ([#28967](https://github.com/kubernetes/kubernetes/pull/28967), [@liggitt](https://github.com/liggitt)) +* Fix problems with container restarts and flocker ([#25874](https://github.com/kubernetes/kubernetes/pull/25874), [@simonswine](https://github.com/simonswine)) + + + +# v1.4.0-alpha.1 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/master/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.4.0-alpha.1/kubernetes.tar.gz) | `11a199208c5164a291c1767a1b9e64e45fdea747` | `334f349daf9268d8ac091d7fcc8e4626` + +## Changelog since v1.3.0 + +### Experimental Features + +* An alpha implementation of the TLS bootstrap API described in docs/proposals/kubelet-tls-bootstrap.md. ([#25562](https://github.com/kubernetes/kubernetes/pull/25562), [@gtank](https://github.com/gtank)) + +### Action Required + +* [kubelet] Allow opting out of automatic cloud provider detection in kubelet. By default kubelet will auto-detect cloud providers ([#28258](https://github.com/kubernetes/kubernetes/pull/28258), [@vishh](https://github.com/vishh)) +* If you use one of the kube-dns replication controller manifest in `cluster/saltbase/salt/kube-dns`, i.e. `cluster/saltbase/salt/kube-dns/{skydns-rc.yaml.base,skydns-rc.yaml.in}`, either substitute one of `__PILLAR__FEDERATIONS__DOMAIN__MAP__` or `{{ pillar['federations_domain_map'] }}` with the corresponding federation name to domain name value or remove them if you do not support cluster federation at this time. If you plan to substitute the parameter with its value, here is an example for `{{ pillar['federations_domain_map'] }` ([#28132](https://github.com/kubernetes/kubernetes/pull/28132), [@madhusudancs](https://github.com/madhusudancs)) + * pillar['federations_domain_map'] = "- --federations=myfederation=federation.test" + * where `myfederation` is the name of the federation and `federation.test` is the domain name registered for the federation. +* Proportionally scale paused and rolling deployments ([#20273](https://github.com/kubernetes/kubernetes/pull/20273), [@kargakis](https://github.com/kargakis)) + +### Other notable changes + +* Support --all-namespaces in kubectl describe ([#26315](https://github.com/kubernetes/kubernetes/pull/26315), [@dims](https://github.com/dims)) +* Add checks in Create and Update Cgroup methods ([#28566](https://github.com/kubernetes/kubernetes/pull/28566), [@dubstack](https://github.com/dubstack)) +* Update coreos node e2e image to a version that uses cgroupfs ([#28661](https://github.com/kubernetes/kubernetes/pull/28661), [@dubstack](https://github.com/dubstack)) +* Don't delete affinity when endpoints are empty ([#28655](https://github.com/kubernetes/kubernetes/pull/28655), [@freehan](https://github.com/freehan)) +* Enable memory based pod evictions by default on the kubelet. ([#28607](https://github.com/kubernetes/kubernetes/pull/28607), [@derekwaynecarr](https://github.com/derekwaynecarr)) + * Trigger pod eviction when available memory falls below 100Mi. +* Enable extensions/v1beta1/NetworkPolicy by default ([#28549](https://github.com/kubernetes/kubernetes/pull/28549), [@caseydavenport](https://github.com/caseydavenport)) +* MESOS: Support a pre-installed km binary at a well known, agent-local path ([#28447](https://github.com/kubernetes/kubernetes/pull/28447), [@k82cn](https://github.com/k82cn)) +* kubectl should print usage at the bottom ([#25640](https://github.com/kubernetes/kubernetes/pull/25640), [@dims](https://github.com/dims)) +* A new command "kubectl config get-contexts" has been added. ([#25463](https://github.com/kubernetes/kubernetes/pull/25463), [@asalkeld](https://github.com/asalkeld)) +* kubectl: ignore only update conflicts in the scaler ([#27048](https://github.com/kubernetes/kubernetes/pull/27048), [@kargakis](https://github.com/kargakis)) +* Declare out of disk when there is no free inodes ([#28176](https://github.com/kubernetes/kubernetes/pull/28176), [@ronnielai](https://github.com/ronnielai)) +* Includes the number of free inodes in stat summary ([#28173](https://github.com/kubernetes/kubernetes/pull/28173), [@ronnielai](https://github.com/ronnielai)) +* kubectl: don't display an empty list when trying to get a single resource that isn't found ([#28294](https://github.com/kubernetes/kubernetes/pull/28294), [@ncdc](https://github.com/ncdc)) +* Graceful deletion bumps object's generation ([#27269](https://github.com/kubernetes/kubernetes/pull/27269), [@gmarek](https://github.com/gmarek)) +* Allow specifying secret data using strings ([#28263](https://github.com/kubernetes/kubernetes/pull/28263), [@liggitt](https://github.com/liggitt)) +* kubectl help now provides "Did you mean this?" suggestions for typo/invalid command names. ([#27049](https://github.com/kubernetes/kubernetes/pull/27049), [@andreykurilin](https://github.com/andreykurilin)) +* Lock all possible kubecfg files at the beginning of ModifyConfig. ([#28232](https://github.com/kubernetes/kubernetes/pull/28232), [@cjcullen](https://github.com/cjcullen)) +* Enable HTTP2 by default ([#28114](https://github.com/kubernetes/kubernetes/pull/28114), [@timothysc](https://github.com/timothysc)) +* Influxdb migrated to PetSet and PersistentVolumes. ([#28109](https://github.com/kubernetes/kubernetes/pull/28109), [@jszczepkowski](https://github.com/jszczepkowski)) +* Change references to gs://kubernetes-release/ci ([#28193](https://github.com/kubernetes/kubernetes/pull/28193), [@zmerlynn](https://github.com/zmerlynn)) +* Build: Add KUBE_GCS_RELEASE_BUCKET_MIRROR option to push-ci-build.sh ([#28172](https://github.com/kubernetes/kubernetes/pull/28172), [@zmerlynn](https://github.com/zmerlynn)) +* Skip multi-zone e2e tests unless provider is GCE, GKE or AWS ([#27871](https://github.com/kubernetes/kubernetes/pull/27871), [@lukaszo](https://github.com/lukaszo)) +* Making DHCP_OPTION_SET_ID creation optional ([#27278](https://github.com/kubernetes/kubernetes/pull/27278), [@activars](https://github.com/activars)) +* Convert service account token controller to use a work queue ([#23858](https://github.com/kubernetes/kubernetes/pull/23858), [@liggitt](https://github.com/liggitt)) +* Adding OWNERS for federation ([#28042](https://github.com/kubernetes/kubernetes/pull/28042), [@nikhiljindal](https://github.com/nikhiljindal)) +* Modifying the default container GC policy parameters ([#27881](https://github.com/kubernetes/kubernetes/pull/27881), [@ronnielai](https://github.com/ronnielai)) +* Kubelet can retrieve host IP even when apiserver has not been contacted ([#27508](https://github.com/kubernetes/kubernetes/pull/27508), [@aaronlevy](https://github.com/aaronlevy)) +* Add the Patch method to the generated clientset. ([#27293](https://github.com/kubernetes/kubernetes/pull/27293), [@caesarxuchao](https://github.com/caesarxuchao)) +* let patch use --local flag like `kubectl set image` ([#26722](https://github.com/kubernetes/kubernetes/pull/26722), [@deads2k](https://github.com/deads2k)) +* enable recursive processing in kubectl edit ([#25085](https://github.com/kubernetes/kubernetes/pull/25085), [@metral](https://github.com/metral)) +* Image GC logic should compensate for reserved blocks ([#27996](https://github.com/kubernetes/kubernetes/pull/27996), [@ronnielai](https://github.com/ronnielai)) +* Bump minimum API version for docker to 1.21 ([#27208](https://github.com/kubernetes/kubernetes/pull/27208), [@yujuhong](https://github.com/yujuhong)) +* Adding lock files for kubeconfig updating ([#28034](https://github.com/kubernetes/kubernetes/pull/28034), [@krousey](https://github.com/krousey)) + + +# v1.3.0 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.3.0/kubernetes.tar.gz) | `88249c443d438666928379aa7fe865b389ed72ea` | `9270f001aef8c03ff5db63456ca9eecc` + +## Highlights + +* Authorization: + * **Alpha** RBAC authorization API group +* Federation + * federation api group is now **beta** + * Services from all federated clusters are now registered in Cloud DNS (AWS and GCP). +* Stateful Apps: + * **alpha** PetSets manage stateful apps + * **alpha** Init containers provide one-time setup for stateful containers +* Updating: + * Retry Pod/RC updates in kubectl rolling-update. + * Stop 'kubectl drain' deleting pods with local storage. + * Add `kubectl rollout status` +* Security/Auth + * L7 LB controller and disk attach controllers run on master, so nodes do not need those privileges. + * Setting TLS1.2 minimum + * `kubectl create secret tls` command + * Webhook Token Authenticator + * **beta** PodSecurityPolicy objects limits use of security-sensitive features by pods. +* Kubectl + * Display line number on JSON errors + * Add flag -t as shorthand for --tty +* Resources + * Improved node stability by *optionally* evicting pods upon memory pressure - [Design Doc](https://github.com/kubernetes/kubernetes/blob/release-1.3/docs/proposals/kubelet-eviction.md) + * **alpha**: NVIDIA GPU support ([#24836](https://github.com/kubernetes/kubernetes/pull/24836), [@therc](https://github.com/therc)) + * Adding loadBalancer services and nodeports services to quota system + +## Known Issues and Important Steps before Upgrading + +The following versions of Docker Engine are supported - *[v1.10](https://github.com/kubernetes/kubernetes/issues/19720)*, *[v1.11](https://github.com/kubernetes/kubernetes/issues/23397)* +Although *v1.9* is still compatible, we recommend upgrading to one of the supported versions. +All prior versions of docker will not be supported. + +#### ThirdPartyResource + +If you use ThirdPartyResource objects, they have moved from being namespaced-scoped to be cluster-scoped. Before upgrading to 1.3.0, export and delete any existing ThirdPartyResource objects using a 1.2.x client: + +kubectl get thirdpartyresource --all-namespaces -o yaml > tprs.yaml +kubectl delete -f tprs.yaml + +After upgrading to 1.3.0, re-register the third party resource objects at the root scope (using a 1.3 server and client): + +kubectl create -f tprs.yaml + +#### kubectl + +Kubectl flag `--container-port` flag is deprecated: it will be removed in the future, please use `--target-port` instead. + +#### kubernetes Core Known Issues + +- Kube Proxy crashes infrequently due to a docker bug ([#24000](https://github.com/docker/docker/issues/24000)) + - This issue can be resolved by restarting docker daemon +- CORS works only in insecure mode ([#24086](https://github.com/kubernetes/kubernetes/issues/24086)) +- Persistent volume claims gets added incorrectly after being deleted under stress. Happens very infrequently. ([#26082](https://github.com/kubernetes/kubernetes/issues/26082)) + +#### Docker runtime Known Issues + +- Kernel crash with Aufs storage driver on Debian Jessie ([#27885](https://github.com/kubernetes/kubernetes/issues/27885)) + - Consider running the *new* [kubernetes node problem detector](https://github.com/kubernetes/node-problem-detector) to identify this (and other) kernel issues automatically. + +- File descriptors are leaked in docker v1.11 ([#275](https://github.com/docker/containerd/issues/275)) +- Additional memory overhead per container in docker v1.11 ([#21737](https://github.com/docker/docker/issues/21737)) +- [List of upstream fixes](https://github.com/docker/docker/compare/v1.10.3...runcom:docker-1.10.3-stable) for docker v1.10 identified by RedHat + +#### Rkt runtime Known Issues + +- A detailed list of known issues can be found [here](https://github.com/kubernetes/kubernetes.github.io/blob/release-1.3/docs/getting-started-guides/rkt/notes.md) + +*More Instructions coming soon* + +## Provider-specific Notes + +* AWS + * Support for ap-northeast-2 region (Seoul) + * Allow cross-region image pulling with ECR + * More reliable kube-up/kube-down + * Enable ICMP Type 3 Code 4 for ELBs + * ARP caching fix + * Use /dev/xvdXX names + * ELB: + * ELB proxy protocol support + * mixed plaintext/encrypted ports support in ELBs + * SSL support for ELB listeners + * Allow VPC CIDR to be specified (experimental) + * Fix problems with >2 security groups +* GCP: + * Enable using gcr.io as a Docker registry mirror. + * Make bigger master root disks in GCE for large clusters. + * Change default clusterCIDRs from /16 to /14 allowing 1000 Node clusters by default. + * Allow Debian Jessie on GCE. + * Node problem detector addon pod detects and reports kernel deadlocks. +* OpenStack + * Provider added. +* VSphere: + * Provider updated. + +## Previous Releases Included in v1.3.0 + +- [v1.3.0-beta.3](CHANGELOG.md#v130-beta3) +- [v1.3.0-beta.2](CHANGELOG.md#v130-beta2) +- [v1.3.0-beta.1](CHANGELOG.md#v130-beta1) +- [v1.3.0-alpha.5](CHANGELOG.md#v130-alpha5) +- [v1.3.0-alpha.4](CHANGELOG.md#v130-alpha4) +- [v1.3.0-alpha.3](CHANGELOG.md#v130-alpha3) +- [v1.3.0-alpha.2](CHANGELOG.md#v130-alpha2) +- [v1.3.0-alpha.1](CHANGELOG.md#v130-alpha1) + + + +# v1.3.0-beta.3 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.3.0-beta.3/kubernetes.tar.gz) | `9d18964a294f356bfdc841957dcad8ff35ed909c` | `ee5fcdf86645135ed132663967876dd6` + +## Changelog since v1.3.0-beta.2 + +### Action Required + +* [kubelet] Allow opting out of automatic cloud provider detection in kubelet. By default kubelet will auto-detect cloud providers ([#28258](https://github.com/kubernetes/kubernetes/pull/28258), [@vishh](https://github.com/vishh)) +* If you use one of the kube-dns replication controller manifest in `cluster/saltbase/salt/kube-dns`, i.e. `cluster/saltbase/salt/kube-dns/{skydns-rc.yaml.base,skydns-rc.yaml.in}`, either substitute one of `__PILLAR__FEDERATIONS__DOMAIN__MAP__` or `{{ pillar['federations_domain_map'] }}` with the corresponding federation name to domain name value or remove them if you do not support cluster federation at this time. If you plan to substitute the parameter with its value, here is an example for `{{ pillar['federations_domain_map'] }` ([#28132](https://github.com/kubernetes/kubernetes/pull/28132), [@madhusudancs](https://github.com/madhusudancs)) + * pillar['federations_domain_map'] = "- --federations=myfederation=federation.test" + * where `myfederation` is the name of the federation and `federation.test` is the domain name registered for the federation. +* federation: Upgrading the groupversion to v1beta1 ([#28186](https://github.com/kubernetes/kubernetes/pull/28186), [@nikhiljindal](https://github.com/nikhiljindal)) +* Set Dashboard UI version to v1.1.0 ([#27869](https://github.com/kubernetes/kubernetes/pull/27869), [@bryk](https://github.com/bryk)) + +### Other notable changes + +* Build: Add KUBE_GCS_RELEASE_BUCKET_MIRROR option to push-ci-build.sh ([#28172](https://github.com/kubernetes/kubernetes/pull/28172), [@zmerlynn](https://github.com/zmerlynn)) +* Image GC logic should compensate for reserved blocks ([#27996](https://github.com/kubernetes/kubernetes/pull/27996), [@ronnielai](https://github.com/ronnielai)) +* Bump minimum API version for docker to 1.21 ([#27208](https://github.com/kubernetes/kubernetes/pull/27208), [@yujuhong](https://github.com/yujuhong)) +* Adding lock files for kubeconfig updating ([#28034](https://github.com/kubernetes/kubernetes/pull/28034), [@krousey](https://github.com/krousey)) +* federation service controller: fixing the logic to update DNS records ([#27999](https://github.com/kubernetes/kubernetes/pull/27999), [@quinton-hoole](https://github.com/quinton-hoole)) +* federation: Updating KubeDNS to try finding a local service first for federation query ([#27708](https://github.com/kubernetes/kubernetes/pull/27708), [@nikhiljindal](https://github.com/nikhiljindal)) +* Support journal logs in fluentd-gcp on GCI ([#27981](https://github.com/kubernetes/kubernetes/pull/27981), [@a-robinson](https://github.com/a-robinson)) +* Copy and display source location prominently on Kubernetes instances ([#27985](https://github.com/kubernetes/kubernetes/pull/27985), [@maisem](https://github.com/maisem)) +* Federation e2e support for AWS ([#27791](https://github.com/kubernetes/kubernetes/pull/27791), [@colhom](https://github.com/colhom)) +* Copy and display source location prominently on Kubernetes instances ([#27840](https://github.com/kubernetes/kubernetes/pull/27840), [@zmerlynn](https://github.com/zmerlynn)) +* AWS/GCE: Spread PetSet volume creation across zones, create GCE volumes in non-master zones ([#27553](https://github.com/kubernetes/kubernetes/pull/27553), [@justinsb](https://github.com/justinsb)) +* GCE provider: Create TargetPool with 200 instances, then update with rest ([#27829](https://github.com/kubernetes/kubernetes/pull/27829), [@zmerlynn](https://github.com/zmerlynn)) +* Add sources to server tarballs. ([#27830](https://github.com/kubernetes/kubernetes/pull/27830), [@david-mcmahon](https://github.com/david-mcmahon)) +* Retry Pod/RC updates in kubectl rolling-update ([#27509](https://github.com/kubernetes/kubernetes/pull/27509), [@janetkuo](https://github.com/janetkuo)) +* AWS kube-up: Authorize route53 in the IAM policy ([#27794](https://github.com/kubernetes/kubernetes/pull/27794), [@justinsb](https://github.com/justinsb)) +* Allow conformance tests to run on non-GCE providers ([#26932](https://github.com/kubernetes/kubernetes/pull/26932), [@aaronlevy](https://github.com/aaronlevy)) +* AWS kube-up: move to Docker 1.11.2 ([#27676](https://github.com/kubernetes/kubernetes/pull/27676), [@justinsb](https://github.com/justinsb)) +* Fixed an issue that Deployment may be scaled down further than allowed by maxUnavailable when minReadySeconds is set. ([#27728](https://github.com/kubernetes/kubernetes/pull/27728), [@janetkuo](https://github.com/janetkuo)) + + + +# v1.2.5 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.2/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.2.5/kubernetes.tar.gz) | `ddf12d7f37dfef25308798d71ad547761d0785ac` | `69d770df8fa4eceb57167e34df3962ca` + +## Changes since v1.2.4 + +### Other notable changes + +* Retry Pod/RC updates in kubectl rolling-update ([#27509](https://github.com/kubernetes/kubernetes/pull/27509), [@janetkuo](https://github.com/janetkuo)) +* GCE provider: Create TargetPool with 200 instances, then update with rest ([#27865](https://github.com/kubernetes/kubernetes/pull/27865), [@zmerlynn](https://github.com/zmerlynn)) +* GCE provider: Limit Filter calls to regexps rather than large blobs ([#27741](https://github.com/kubernetes/kubernetes/pull/27741), [@zmerlynn](https://github.com/zmerlynn)) +* Fix strategic merge diff list diff bug ([#26418](https://github.com/kubernetes/kubernetes/pull/26418), [@AdoHe](https://github.com/AdoHe)) +* AWS: Fix long-standing bug in stringSetToPointers ([#26331](https://github.com/kubernetes/kubernetes/pull/26331), [@therc](https://github.com/therc)) +* AWS kube-up: Increase timeout waiting for docker start ([#25405](https://github.com/kubernetes/kubernetes/pull/25405), [@justinsb](https://github.com/justinsb)) +* Fix hyperkube flag parsing ([#25512](https://github.com/kubernetes/kubernetes/pull/25512), [@colhom](https://github.com/colhom)) +* kubectl rolling-update support for same image ([#24645](https://github.com/kubernetes/kubernetes/pull/24645), [@jlowdermilk](https://github.com/jlowdermilk)) +* Return "410 Gone" errors via watch stream when using watch cache ([#25369](https://github.com/kubernetes/kubernetes/pull/25369), [@liggitt](https://github.com/liggitt)) + + + +# v1.3.0-beta.2 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.3.0-beta.2/kubernetes.tar.gz) | `9c95762970b943d6c6547f0841c1e5471148b0e3` | `dc9e8560f24459b2313317b15910bee7` + +## Changes since v1.3.0-beta.1 + +### Experimental Features + +* Init containers enable pod authors to perform tasks before their normal containers start. Each init container is started in order, and failing containers will prevent the application from starting. ([#23666](https://github.com/kubernetes/kubernetes/pull/23666), [@smarterclayton](https://github.com/smarterclayton)) + +### Other notable changes + +* GCE provider: Limit Filter calls to regexps rather than large blobs ([#27741](https://github.com/kubernetes/kubernetes/pull/27741), [@zmerlynn](https://github.com/zmerlynn)) +* Show LASTSEEN, the sorting key, as the first column in `kubectl get event` output ([#27549](https://github.com/kubernetes/kubernetes/pull/27549), [@therc](https://github.com/therc)) +* GCI: fix kubectl permission issue [#27643](https://github.com/kubernetes/kubernetes/pull/27643) ([#27740](https://github.com/kubernetes/kubernetes/pull/27740), [@andyzheng0831](https://github.com/andyzheng0831)) +* Add federation api and cm servers to hyperkube ([#27586](https://github.com/kubernetes/kubernetes/pull/27586), [@colhom](https://github.com/colhom)) +* federation: Creating kubeconfig files to be used for creating secrets for clusters on aws and gke ([#27332](https://github.com/kubernetes/kubernetes/pull/27332), [@nikhiljindal](https://github.com/nikhiljindal)) +* AWS: Enable ICMP Type 3 Code 4 for ELBs ([#27677](https://github.com/kubernetes/kubernetes/pull/27677), [@justinsb](https://github.com/justinsb)) +* Bumped Heapster to v1.1.0. ([#27542](https://github.com/kubernetes/kubernetes/pull/27542), [@piosz](https://github.com/piosz)) + * More details about the release https://github.com/kubernetes/heapster/releases/tag/v1.1.0 +* Deleting federation-push.sh ([#27400](https://github.com/kubernetes/kubernetes/pull/27400), [@nikhiljindal](https://github.com/nikhiljindal)) +* Validate-cluster finishes shortly after at most ALLOWED_NOTREADY_NODE… ([#26778](https://github.com/kubernetes/kubernetes/pull/26778), [@gmarek](https://github.com/gmarek)) +* AWS kube-down: Issue warning if VPC not found ([#27518](https://github.com/kubernetes/kubernetes/pull/27518), [@justinsb](https://github.com/justinsb)) +* gce/kube-down: Parallelize IGM deletion, batch more ([#27302](https://github.com/kubernetes/kubernetes/pull/27302), [@zmerlynn](https://github.com/zmerlynn)) +* Enable dynamic allocation of heapster/eventer cpu request/limit ([#27185](https://github.com/kubernetes/kubernetes/pull/27185), [@gmarek](https://github.com/gmarek)) +* 'kubectl describe pv' now shows events ([#27431](https://github.com/kubernetes/kubernetes/pull/27431), [@jsafrane](https://github.com/jsafrane)) +* AWS kube-up: set net.ipv4.neigh.default.gc_thresh1=0 to avoid ARP over-caching ([#27682](https://github.com/kubernetes/kubernetes/pull/27682), [@justinsb](https://github.com/justinsb)) +* AWS volumes: Use /dev/xvdXX names with EC2 ([#27628](https://github.com/kubernetes/kubernetes/pull/27628), [@justinsb](https://github.com/justinsb)) +* Add a test config variable to specify desired Docker version to run on GCI. ([#26813](https://github.com/kubernetes/kubernetes/pull/26813), [@wonderfly](https://github.com/wonderfly)) +* Check for thin_is binary in path for devicemapper when using ThinPoolWatcher and fix uint64 overflow issue for CPU stats ([#27591](https://github.com/kubernetes/kubernetes/pull/27591), [@dchen1107](https://github.com/dchen1107)) +* Change default value of deleting-pods-burst to 1 ([#27606](https://github.com/kubernetes/kubernetes/pull/27606), [@gmarek](https://github.com/gmarek)) +* MESOS: fix race condition in contrib/mesos/pkg/queue/delay ([#24916](https://github.com/kubernetes/kubernetes/pull/24916), [@jdef](https://github.com/jdef)) +* including federation binaries in the list of images we push during release ([#27396](https://github.com/kubernetes/kubernetes/pull/27396), [@nikhiljindal](https://github.com/nikhiljindal)) +* fix updatePod() of RS and RC controllers ([#27415](https://github.com/kubernetes/kubernetes/pull/27415), [@caesarxuchao](https://github.com/caesarxuchao)) +* Change default value of deleting-pods-burst to 1 ([#27422](https://github.com/kubernetes/kubernetes/pull/27422), [@gmarek](https://github.com/gmarek)) +* A new volume manager was introduced in kubelet that synchronizes volume mount/unmount (and attach/detach, if attach/detach controller is not enabled). ([#26801](https://github.com/kubernetes/kubernetes/pull/26801), [@saad-ali](https://github.com/saad-ali)) + * This eliminates the race conditions between the pod creation loop and the orphaned volumes loops. It also removes the unmount/detach from the `syncPod()` path so volume clean up never blocks the `syncPod` loop. + + + +# v1.3.0-beta.1 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.3.0-beta.1/kubernetes.tar.gz) | `2b54995ee8f52d78dc31c3d7291e8dfa5c809fe7` | `f1022a84c3441cae4ebe1d295470be8f` + +## Changes since v1.3.0-alpha.5 + +### Action Required + +* Fixing logic to generate ExternalHost in genericapiserver ([#26796](https://github.com/kubernetes/kubernetes/pull/26796), [@nikhiljindal](https://github.com/nikhiljindal)) +* federation: Updating federation-controller-manager to use secret to get federation-apiserver's kubeconfig ([#26819](https://github.com/kubernetes/kubernetes/pull/26819), [@nikhiljindal](https://github.com/nikhiljindal)) + +### Other notable changes + +* federation: fix dns provider initialization issues ([#27252](https://github.com/kubernetes/kubernetes/pull/27252), [@mfanjie](https://github.com/mfanjie)) +* Updating federation up scripts to work in non e2e setup ([#27260](https://github.com/kubernetes/kubernetes/pull/27260), [@nikhiljindal](https://github.com/nikhiljindal)) +* version bump for gci to milestone 53 ([#27210](https://github.com/kubernetes/kubernetes/pull/27210), [@adityakali](https://github.com/adityakali)) +* kubectl apply: retry applying a patch if a version conflict error is encountered ([#26557](https://github.com/kubernetes/kubernetes/pull/26557), [@AdoHe](https://github.com/AdoHe)) +* Revert "Wait for arc.getArchive() to complete before running tests" ([#27130](https://github.com/kubernetes/kubernetes/pull/27130), [@pwittrock](https://github.com/pwittrock)) +* ResourceQuota BestEffort scope aligned with Pod level QoS ([#26969](https://github.com/kubernetes/kubernetes/pull/26969), [@derekwaynecarr](https://github.com/derekwaynecarr)) +* The AWS cloudprovider will cache results from DescribeInstances() if the set of nodes hasn't changed ([#26900](https://github.com/kubernetes/kubernetes/pull/26900), [@therc](https://github.com/therc)) +* GCE provider: Log full contents of long operations ([#26962](https://github.com/kubernetes/kubernetes/pull/26962), [@zmerlynn](https://github.com/zmerlynn)) +* Fix system container detection in kubelet on systemd. ([#26586](https://github.com/kubernetes/kubernetes/pull/26586), [@derekwaynecarr](https://github.com/derekwaynecarr)) + * This fixed environments where CPU and Memory Accounting were not enabled on the unit that launched the kubelet or docker from reporting the root cgroup when monitoring usage stats for those components. +* New default horizontalpodautoscaler/v1 generator for kubectl autoscale. ([#26775](https://github.com/kubernetes/kubernetes/pull/26775), [@piosz](https://github.com/piosz)) + * Use autoscaling/v1 in kubectl by default. +* federation: Adding dnsprovider flags to federation-controller-manager ([#27158](https://github.com/kubernetes/kubernetes/pull/27158), [@nikhiljindal](https://github.com/nikhiljindal)) +* federation service controller: fixing a bug so that existing services are created in newly registered clusters ([#27028](https://github.com/kubernetes/kubernetes/pull/27028), [@mfanjie](https://github.com/mfanjie)) +* Rename environment variables (KUBE_)ENABLE_NODE_AUTOSCALER to (KUBE_)ENABLE_CLUSTER_AUTOSCALER. ([#27117](https://github.com/kubernetes/kubernetes/pull/27117), [@mwielgus](https://github.com/mwielgus)) +* support for mounting local-ssds on GCI ([#27143](https://github.com/kubernetes/kubernetes/pull/27143), [@adityakali](https://github.com/adityakali)) +* AWS: support mixed plaintext/encrypted ports in ELBs via service.beta.kubernetes.io/aws-load-balancer-ssl-ports annotation ([#26976](https://github.com/kubernetes/kubernetes/pull/26976), [@therc](https://github.com/therc)) +* Updating e2e docs with instructions on running federation tests ([#27072](https://github.com/kubernetes/kubernetes/pull/27072), [@colhom](https://github.com/colhom)) +* LBaaS v2 Support for Openstack Cloud Provider Plugin ([#25987](https://github.com/kubernetes/kubernetes/pull/25987), [@dagnello](https://github.com/dagnello)) +* GCI: add support for network plugin ([#27027](https://github.com/kubernetes/kubernetes/pull/27027), [@andyzheng0831](https://github.com/andyzheng0831)) +* Bump cAdvisor to v0.23.3 ([#27065](https://github.com/kubernetes/kubernetes/pull/27065), [@timstclair](https://github.com/timstclair)) +* Stop 'kubectl drain' deleting pods with local storage. ([#26667](https://github.com/kubernetes/kubernetes/pull/26667), [@mml](https://github.com/mml)) +* Networking e2es: Wait for all nodes to be schedulable in kubeproxy and networking tests ([#27008](https://github.com/kubernetes/kubernetes/pull/27008), [@zmerlynn](https://github.com/zmerlynn)) +* change clientset of service controller to versioned ([#26694](https://github.com/kubernetes/kubernetes/pull/26694), [@mfanjie](https://github.com/mfanjie)) +* Use gcr.io as a Docker registry mirror when setting up a cluster in GCE. ([#25841](https://github.com/kubernetes/kubernetes/pull/25841), [@ojarjur](https://github.com/ojarjur)) +* correction on rbd volume object and defaults ([#25490](https://github.com/kubernetes/kubernetes/pull/25490), [@rootfs](https://github.com/rootfs)) +* Bump GCE debian image to container-v1-3-v20160604 ([#26851](https://github.com/kubernetes/kubernetes/pull/26851), [@zmerlynn](https://github.com/zmerlynn)) +* Option to enable http2 on client connections. ([#25280](https://github.com/kubernetes/kubernetes/pull/25280), [@timothysc](https://github.com/timothysc)) +* kubectl get ingress output remove rules ([#26684](https://github.com/kubernetes/kubernetes/pull/26684), [@AdoHe](https://github.com/AdoHe)) +* AWS kube-up: Remove SecurityContextDeny admission controller (to mirror GCE) ([#25381](https://github.com/kubernetes/kubernetes/pull/25381), [@zquestz](https://github.com/zquestz)) +* Fix third party ([#25894](https://github.com/kubernetes/kubernetes/pull/25894), [@brendandburns](https://github.com/brendandburns)) +* AWS Route53 dnsprovider ([#26049](https://github.com/kubernetes/kubernetes/pull/26049), [@quinton-hoole](https://github.com/quinton-hoole)) +* GCI/Trusty: support the Docker registry mirror ([#26745](https://github.com/kubernetes/kubernetes/pull/26745), [@andyzheng0831](https://github.com/andyzheng0831)) +* Kubernetes v1.3 introduces a new Attach/Detach Controller. This controller manages attaching and detaching of volumes on-behalf of nodes. ([#26351](https://github.com/kubernetes/kubernetes/pull/26351), [@saad-ali](https://github.com/saad-ali)) + * This ensures that attachment and detachment of volumes is independent of any single nodes’ availability. Meaning, if a node or kubelet becomes unavailable for any reason, the volumes attached to that node will be detached so they are free to be attached to other nodes. + * Specifically the new controller watches the API server for scheduled pods. It processes each pod and ensures that any volumes that implement the volume Attacher interface are attached to the node their pod is scheduled to. + * When a pod is deleted, the controller waits for the volume to be safely unmounted by kubelet. It does this by waiting for the volume to no longer be present in the nodes Node.Status.VolumesInUse list. If the volume is not safely unmounted by kubelet within a pre-configured duration (3 minutes in Kubernetes v1.3), the controller unilaterally detaches the volume (this prevents volumes from getting stranded on nodes that become unavailable). + * In order to remain backwards compatible, the new controller only manages attach/detach of volumes that are scheduled to nodes that opt-in to controller management. Nodes running v1.3 or higher of Kubernetes opt-in to controller management by default by setting the "volumes.kubernetes.io/controller-managed-attach-detach" annotation on the Node object on startup. This behavior is gated by a new kubelet flag, "enable-controller-attach-detach,” (default true). + * In order to safely upgrade an existing Kubernetes cluster without interruption of volume attach/detach logic: + * First upgrade the master to Kubernetes v1.3. + * This will start the new attach/detach controller. + * The new controller will initially ignore volumes for all nodes since they lack the "volumes.kubernetes.io/controller-managed-attach-detach" annotation. + * Then upgrade nodes to Kubernetes v1.3. + * As nodes are upgraded, they will automatically, by default, opt-in to attach/detach controller management, which will cause the controller to start managing attaches/detaches for volumes that get scheduled to those nodes. +* Added DNS Reverse Record logic for service IPs ([#26226](https://github.com/kubernetes/kubernetes/pull/26226), [@ArtfulCoder](https://github.com/ArtfulCoder)) +* read gluster log to surface glusterfs plugin errors properly in describe events ([#24808](https://github.com/kubernetes/kubernetes/pull/24808), [@screeley44](https://github.com/screeley44)) + + + +# v1.3.0-alpha.5 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/master/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.3.0-alpha.5/kubernetes.tar.gz) | `724bf5a4437ca9dc75d9297382f47a179e8dc5a6` | `2a8b4a5297df3007fce69f1e344fd87e` + +## Changes since v1.3.0-alpha.4 + +### Action Required + +* Add direct serializer ([#26251](https://github.com/kubernetes/kubernetes/pull/26251), [@caesarxuchao](https://github.com/caesarxuchao)) +* Add a NodeCondition "NetworkUnavailable" to prevent scheduling onto a node until the routes have been created ([#26415](https://github.com/kubernetes/kubernetes/pull/26415), [@wojtek-t](https://github.com/wojtek-t)) +* Add garbage collector into kube-controller-manager ([#26341](https://github.com/kubernetes/kubernetes/pull/26341), [@caesarxuchao](https://github.com/caesarxuchao)) +* Add orphaning finalizer logic to GC ([#25599](https://github.com/kubernetes/kubernetes/pull/25599), [@caesarxuchao](https://github.com/caesarxuchao)) +* GCI-backed masters mount srv/kubernetes and srv/sshproxy in the right place ([#26238](https://github.com/kubernetes/kubernetes/pull/26238), [@ihmccreery](https://github.com/ihmccreery)) +* Updaing QoS policy to be at the pod level ([#14943](https://github.com/kubernetes/kubernetes/pull/14943), [@vishh](https://github.com/vishh)) +* add CIDR allocator for NodeController ([#19242](https://github.com/kubernetes/kubernetes/pull/19242), [@mqliang](https://github.com/mqliang)) +* Adding garbage collector controller ([#24509](https://github.com/kubernetes/kubernetes/pull/24509), [@caesarxuchao](https://github.com/caesarxuchao)) + +### Other notable changes + +* Fix a bug with pluralization of third party resources ([#25374](https://github.com/kubernetes/kubernetes/pull/25374), [@brendandburns](https://github.com/brendandburns)) +* Run l7 controller on master ([#26048](https://github.com/kubernetes/kubernetes/pull/26048), [@bprashanth](https://github.com/bprashanth)) +* AWS: ELB proxy protocol support via annotation service.beta.kubernetes.io/aws-load-balancer-proxy-protocol ([#24569](https://github.com/kubernetes/kubernetes/pull/24569), [@williamsandrew](https://github.com/williamsandrew)) +* kubectl run --restart=Never creates pods ([#25253](https://github.com/kubernetes/kubernetes/pull/25253), [@soltysh](https://github.com/soltysh)) +* Add LabelSelector to PersistentVolumeClaimSpec ([#25917](https://github.com/kubernetes/kubernetes/pull/25917), [@pmorie](https://github.com/pmorie)) +* Removed metrics api group ([#26073](https://github.com/kubernetes/kubernetes/pull/26073), [@piosz](https://github.com/piosz)) +* Fixed check in kubectl autoscale: cpu consumption can be higher than 100%. ([#26162](https://github.com/kubernetes/kubernetes/pull/26162), [@jszczepkowski](https://github.com/jszczepkowski)) +* Add support for 3rd party objects to kubectl label ([#24882](https://github.com/kubernetes/kubernetes/pull/24882), [@brendandburns](https://github.com/brendandburns)) +* Move shell completion generation into 'kubectl completion' command ([#23801](https://github.com/kubernetes/kubernetes/pull/23801), [@sttts](https://github.com/sttts)) +* Fix strategic merge diff list diff bug ([#26418](https://github.com/kubernetes/kubernetes/pull/26418), [@AdoHe](https://github.com/AdoHe)) +* Setting TLS1.2 minimum because TLS1.0 and TLS1.1 are vulnerable ([#26169](https://github.com/kubernetes/kubernetes/pull/26169), [@victorgp](https://github.com/victorgp)) +* Kubelet: Periodically reporting image pulling progress in log ([#26145](https://github.com/kubernetes/kubernetes/pull/26145), [@Random-Liu](https://github.com/Random-Liu)) +* Federation service controller is one key component of federation controller manager, it watches federation service, creates/updates services to all registered clusters, and update DNS records to global DNS server. ([#26034](https://github.com/kubernetes/kubernetes/pull/26034), [@mfanjie](https://github.com/mfanjie)) +* Stabilize map order in kubectl describe ([#26046](https://github.com/kubernetes/kubernetes/pull/26046), [@timoreimann](https://github.com/timoreimann)) +* Google Cloud DNS dnsprovider - replacement for [#25389](https://github.com/kubernetes/kubernetes/pull/25389) ([#26020](https://github.com/kubernetes/kubernetes/pull/26020), [@quinton-hoole](https://github.com/quinton-hoole)) +* Fix system container detection in kubelet on systemd. ([#25982](https://github.com/kubernetes/kubernetes/pull/25982), [@derekwaynecarr](https://github.com/derekwaynecarr)) + * This fixed environments where CPU and Memory Accounting were not enabled on the unit + * that launched the kubelet or docker from reporting the root cgroup when + * monitoring usage stats for those components. +* Added pods-per-core to kubelet. [#25762](https://github.com/kubernetes/kubernetes/pull/25762) ([#25813](https://github.com/kubernetes/kubernetes/pull/25813), [@rrati](https://github.com/rrati)) +* promote sourceRange into service spec ([#25826](https://github.com/kubernetes/kubernetes/pull/25826), [@freehan](https://github.com/freehan)) +* kube-controller-manager: Add configure-cloud-routes option ([#25614](https://github.com/kubernetes/kubernetes/pull/25614), [@justinsb](https://github.com/justinsb)) +* kubelet: reading cloudinfo from cadvisor ([#21373](https://github.com/kubernetes/kubernetes/pull/21373), [@enoodle](https://github.com/enoodle)) +* Disable cAdvisor event storage by default ([#24771](https://github.com/kubernetes/kubernetes/pull/24771), [@timstclair](https://github.com/timstclair)) +* Remove docker-multinode ([#26031](https://github.com/kubernetes/kubernetes/pull/26031), [@luxas](https://github.com/luxas)) +* nodecontroller: Fix log message on successful update ([#26207](https://github.com/kubernetes/kubernetes/pull/26207), [@zmerlynn](https://github.com/zmerlynn)) +* remove deprecated generated typed clients ([#26336](https://github.com/kubernetes/kubernetes/pull/26336), [@caesarxuchao](https://github.com/caesarxuchao)) +* Kubenet host-port support through iptables ([#25604](https://github.com/kubernetes/kubernetes/pull/25604), [@freehan](https://github.com/freehan)) +* Add metrics support for a GCE PD, EC2 EBS & Azure File volumes ([#25852](https://github.com/kubernetes/kubernetes/pull/25852), [@vishh](https://github.com/vishh)) +* Bump cAdvisor to v0.23.2 - See [changelog](https://github.com/google/cadvisor/blob/master/CHANGELOG.md) for details ([#25914](https://github.com/kubernetes/kubernetes/pull/25914), [@timstclair](https://github.com/timstclair)) +* Alpha version of "Role Based Access Control" API. ([#25634](https://github.com/kubernetes/kubernetes/pull/25634), [@ericchiang](https://github.com/ericchiang)) +* Add Seccomp API ([#25324](https://github.com/kubernetes/kubernetes/pull/25324), [@jfrazelle](https://github.com/jfrazelle)) +* AWS: Fix long-standing bug in stringSetToPointers ([#26331](https://github.com/kubernetes/kubernetes/pull/26331), [@therc](https://github.com/therc)) +* Add dnsmasq as a DNS cache in kube-dns pod ([#26114](https://github.com/kubernetes/kubernetes/pull/26114), [@ArtfulCoder](https://github.com/ArtfulCoder)) +* routecontroller: Add wait.NonSlidingUntil, use it ([#26301](https://github.com/kubernetes/kubernetes/pull/26301), [@zmerlynn](https://github.com/zmerlynn)) +* Attempt 2: Bump GCE containerVM to container-v1-3-v20160517 (Docker 1.11.1) again. ([#26001](https://github.com/kubernetes/kubernetes/pull/26001), [@dchen1107](https://github.com/dchen1107)) +* Downward API implementation for resources limits and requests ([#24179](https://github.com/kubernetes/kubernetes/pull/24179), [@aveshagarwal](https://github.com/aveshagarwal)) +* GCE clusters start using GCI as the default OS image for masters ([#26197](https://github.com/kubernetes/kubernetes/pull/26197), [@wonderfly](https://github.com/wonderfly)) +* Add a 'kubectl clusterinfo dump' option ([#20672](https://github.com/kubernetes/kubernetes/pull/20672), [@brendandburns](https://github.com/brendandburns)) +* Fixing heapster memory requirements. ([#26109](https://github.com/kubernetes/kubernetes/pull/26109), [@Q-Lee](https://github.com/Q-Lee)) +* Handle federated service name lookups in kube-dns. ([#25727](https://github.com/kubernetes/kubernetes/pull/25727), [@madhusudancs](https://github.com/madhusudancs)) +* Support sort-by timestamp in kubectl get ([#25600](https://github.com/kubernetes/kubernetes/pull/25600), [@janetkuo](https://github.com/janetkuo)) +* vSphere Volume Plugin Implementation ([#24947](https://github.com/kubernetes/kubernetes/pull/24947), [@abithap](https://github.com/abithap)) +* ResourceQuota controller uses rate limiter to prevent hot-loops in error situations ([#25748](https://github.com/kubernetes/kubernetes/pull/25748), [@derekwaynecarr](https://github.com/derekwaynecarr)) +* Fix hyperkube flag parsing ([#25512](https://github.com/kubernetes/kubernetes/pull/25512), [@colhom](https://github.com/colhom)) +* Add a kubectl create secret tls command ([#24719](https://github.com/kubernetes/kubernetes/pull/24719), [@bprashanth](https://github.com/bprashanth)) +* Introduce a new add-on pod NodeProblemDetector. ([#25986](https://github.com/kubernetes/kubernetes/pull/25986), [@Random-Liu](https://github.com/Random-Liu)) + * NodeProblemDetector is a DaemonSet running on each node, monitoring node health and reporting + * node problems as NodeCondition and Event. Currently it already supports kernel log monitoring, and + * will support more problem detection in the future. It is enabled by default on gce now. +* Handle cAdvisor partial failures ([#25933](https://github.com/kubernetes/kubernetes/pull/25933), [@timstclair](https://github.com/timstclair)) +* Use SkyDNS as a library for a more integrated kube DNS ([#23930](https://github.com/kubernetes/kubernetes/pull/23930), [@ArtfulCoder](https://github.com/ArtfulCoder)) +* Introduce node memory pressure condition to scheduler ([#25531](https://github.com/kubernetes/kubernetes/pull/25531), [@ingvagabund](https://github.com/ingvagabund)) +* Fix detection of docker cgroup on RHEL ([#25907](https://github.com/kubernetes/kubernetes/pull/25907), [@ncdc](https://github.com/ncdc)) +* Kubelet evicts pods when available memory falls below configured eviction thresholds ([#25772](https://github.com/kubernetes/kubernetes/pull/25772), [@derekwaynecarr](https://github.com/derekwaynecarr)) +* Use protobufs by default to communicate with apiserver (still store JSONs in etcd) ([#25738](https://github.com/kubernetes/kubernetes/pull/25738), [@wojtek-t](https://github.com/wojtek-t)) +* Implement NetworkPolicy v1beta1 API object / client support. ([#25638](https://github.com/kubernetes/kubernetes/pull/25638), [@caseydavenport](https://github.com/caseydavenport)) +* Only expose top N images in `NodeStatus` ([#25328](https://github.com/kubernetes/kubernetes/pull/25328), [@resouer](https://github.com/resouer)) +* Extend secrets volumes with path control ([#25285](https://github.com/kubernetes/kubernetes/pull/25285), [@ingvagabund](https://github.com/ingvagabund)) +* With this PR, kubectl and other RestClient's using the AuthProvider framework can make OIDC authenticated requests, and, if there is a refresh token present, the tokens will be refreshed as needed. ([#25270](https://github.com/kubernetes/kubernetes/pull/25270), [@bobbyrullo](https://github.com/bobbyrullo)) +* Make addon-manager cross-platform and use it with hyperkube ([#25631](https://github.com/kubernetes/kubernetes/pull/25631), [@luxas](https://github.com/luxas)) +* kubelet: Optionally, have kubelet exit if lock file contention is observed, using --exit-on-lock-contention flag ([#25596](https://github.com/kubernetes/kubernetes/pull/25596), [@derekparker](https://github.com/derekparker)) +* Bump up glbc version to 0.6.2 ([#25446](https://github.com/kubernetes/kubernetes/pull/25446), [@bprashanth](https://github.com/bprashanth)) +* Add "kubectl set image" for easier updating container images (for pods or resources with pod templates). ([#25509](https://github.com/kubernetes/kubernetes/pull/25509), [@janetkuo](https://github.com/janetkuo)) +* NodeController doesn't evict Pods if no Nodes are Ready ([#25571](https://github.com/kubernetes/kubernetes/pull/25571), [@gmarek](https://github.com/gmarek)) +* Incompatible change of kube-up.sh: ([#25734](https://github.com/kubernetes/kubernetes/pull/25734), [@jszczepkowski](https://github.com/jszczepkowski)) + * when turning on cluster autoscaler by setting KUBE_ENABLE_NODE_AUTOSCALER=true, + * KUBE_AUTOSCALER_MIN_NODES and KUBE_AUTOSCALER_MAX_NODES need to be set. +* systemd node spec proposal ([#17688](https://github.com/kubernetes/kubernetes/pull/17688), [@derekwaynecarr](https://github.com/derekwaynecarr)) +* Bump GCE ContainerVM to container-v1-3-v20160517 (Docker 1.11.1) ([#25843](https://github.com/kubernetes/kubernetes/pull/25843), [@zmerlynn](https://github.com/zmerlynn)) +* AWS: Move enforcement of attached AWS device limit from kubelet to scheduler ([#23254](https://github.com/kubernetes/kubernetes/pull/23254), [@jsafrane](https://github.com/jsafrane)) +* Refactor persistent volume controller ([#24331](https://github.com/kubernetes/kubernetes/pull/24331), [@jsafrane](https://github.com/jsafrane)) +* Add support for running GCI on the GCE cloud provider ([#25425](https://github.com/kubernetes/kubernetes/pull/25425), [@andyzheng0831](https://github.com/andyzheng0831)) +* Implement taints and tolerations ([#24134](https://github.com/kubernetes/kubernetes/pull/24134), [@kevin-wangzefeng](https://github.com/kevin-wangzefeng)) +* Add init containers to pods ([#23567](https://github.com/kubernetes/kubernetes/pull/23567), [@smarterclayton](https://github.com/smarterclayton)) + + + +# v1.3.0-alpha.4 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/master/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.3.0-alpha.4/kubernetes.tar.gz) | `758e97e7e50153840379ecd9f8fda1869543539f` | `4e18ae6a428c99fcc30e2137d7c41854` + +## Changes since v1.3.0-alpha.3 + +### Action Required + +* validate third party resources ([#25007](https://github.com/kubernetes/kubernetes/pull/25007), [@liggitt](https://github.com/liggitt)) +* Automatically create the kube-system namespace ([#25196](https://github.com/kubernetes/kubernetes/pull/25196), [@luxas](https://github.com/luxas)) +* Make ThirdPartyResource a root scoped object ([#25006](https://github.com/kubernetes/kubernetes/pull/25006), [@liggitt](https://github.com/liggitt)) +* mark container-port flag as deprecated ([#25072](https://github.com/kubernetes/kubernetes/pull/25072), [@AdoHe](https://github.com/AdoHe)) +* Provide flags to use etcd3 backed storage ([#24455](https://github.com/kubernetes/kubernetes/pull/24455), [@hongchaodeng](https://github.com/hongchaodeng)) + +### Other notable changes + +* Fix hyperkube's layer caching, and remove --make-symlinks at build time ([#25693](https://github.com/kubernetes/kubernetes/pull/25693), [@luxas](https://github.com/luxas)) +* AWS: More support for ap-northeast-2 region ([#24464](https://github.com/kubernetes/kubernetes/pull/24464), [@matthewrudy](https://github.com/matthewrudy)) +* Make bigger master root disks in GCE for large clusters ([#25670](https://github.com/kubernetes/kubernetes/pull/25670), [@gmarek](https://github.com/gmarek)) +* AWS kube-down: don't fail if ELB not in VPC - [#23784](https://github.com/kubernetes/kubernetes/pull/23784) ([#23785](https://github.com/kubernetes/kubernetes/pull/23785), [@ajohnstone](https://github.com/ajohnstone)) +* Build hyperkube in hack/local-up-cluster instead of separate binaries ([#25627](https://github.com/kubernetes/kubernetes/pull/25627), [@luxas](https://github.com/luxas)) +* enable recursive processing in kubectl rollout ([#25110](https://github.com/kubernetes/kubernetes/pull/25110), [@metral](https://github.com/metral)) +* Support struct,array,slice types when sorting kubectl output ([#25022](https://github.com/kubernetes/kubernetes/pull/25022), [@zhouhaibing089](https://github.com/zhouhaibing089)) +* federated api servers: Adding a discovery summarizer server ([#20358](https://github.com/kubernetes/kubernetes/pull/20358), [@nikhiljindal](https://github.com/nikhiljindal)) +* AWS: Allow cross-region image pulling with ECR ([#24369](https://github.com/kubernetes/kubernetes/pull/24369), [@therc](https://github.com/therc)) +* Automatically add node labels beta.kubernetes.io/{os,arch} ([#23684](https://github.com/kubernetes/kubernetes/pull/23684), [@luxas](https://github.com/luxas)) +* kubectl "rm" will suggest using "delete"; "ps" and "list" will suggest "get". ([#25181](https://github.com/kubernetes/kubernetes/pull/25181), [@janetkuo](https://github.com/janetkuo)) +* Add IPv6 address support for pods - does NOT include services ([#23090](https://github.com/kubernetes/kubernetes/pull/23090), [@tgraf](https://github.com/tgraf)) +* Use local disk for ConfigMap volume instead of tmpfs ([#25306](https://github.com/kubernetes/kubernetes/pull/25306), [@pmorie](https://github.com/pmorie)) +* Alpha support for scheduling pods on machines with NVIDIA GPUs whose kubelets use the `--experimental-nvidia-gpus` flag, using the alpha.kubernetes.io/nvidia-gpu resource ([#24836](https://github.com/kubernetes/kubernetes/pull/24836), [@therc](https://github.com/therc)) +* AWS: SSL support for ELB listeners through annotations ([#23495](https://github.com/kubernetes/kubernetes/pull/23495), [@therc](https://github.com/therc)) +* Implement `kubectl rollout status` that can be used to watch a deployment's rollout status ([#19946](https://github.com/kubernetes/kubernetes/pull/19946), [@janetkuo](https://github.com/janetkuo)) +* Webhook Token Authenticator ([#24902](https://github.com/kubernetes/kubernetes/pull/24902), [@cjcullen](https://github.com/cjcullen)) +* Update PodSecurityPolicy types and add admission controller that could enforce them ([#24600](https://github.com/kubernetes/kubernetes/pull/24600), [@pweil-](https://github.com/pweil-)) +* Introducing ScheduledJobs as described in [the proposal](docs/proposals/scheduledjob.md) as part of `batch/v2alpha1` version (experimental feature). ([#24970](https://github.com/kubernetes/kubernetes/pull/24970), [@soltysh](https://github.com/soltysh)) +* kubectl now supports validation of nested objects with different ApiGroups (e.g. objects in a List) ([#25172](https://github.com/kubernetes/kubernetes/pull/25172), [@pwittrock](https://github.com/pwittrock)) +* Change default clusterCIDRs from /16 to /14 in GCE configs allowing 1000 Node clusters by default. ([#25350](https://github.com/kubernetes/kubernetes/pull/25350), [@gmarek](https://github.com/gmarek)) +* Add 'kubectl set' ([#25444](https://github.com/kubernetes/kubernetes/pull/25444), [@janetkuo](https://github.com/janetkuo)) +* vSphere Cloud Provider Implementation ([#24703](https://github.com/kubernetes/kubernetes/pull/24703), [@dagnello](https://github.com/dagnello)) +* Added JobTemplate, a preliminary step for ScheduledJob and Workflow ([#21675](https://github.com/kubernetes/kubernetes/pull/21675), [@soltysh](https://github.com/soltysh)) +* Openstack provider ([#21737](https://github.com/kubernetes/kubernetes/pull/21737), [@zreigz](https://github.com/zreigz)) +* AWS kube-up: Allow VPC CIDR to be specified (experimental) ([#23362](https://github.com/kubernetes/kubernetes/pull/23362), [@miguelfrde](https://github.com/miguelfrde)) +* Return "410 Gone" errors via watch stream when using watch cache ([#25369](https://github.com/kubernetes/kubernetes/pull/25369), [@liggitt](https://github.com/liggitt)) +* GKE provider: Add cluster-ipv4-cidr and arbitrary flags ([#25437](https://github.com/kubernetes/kubernetes/pull/25437), [@zmerlynn](https://github.com/zmerlynn)) +* AWS kube-up: Increase timeout waiting for docker start ([#25405](https://github.com/kubernetes/kubernetes/pull/25405), [@justinsb](https://github.com/justinsb)) +* Sort resources in quota errors to avoid duplicate events ([#25161](https://github.com/kubernetes/kubernetes/pull/25161), [@derekwaynecarr](https://github.com/derekwaynecarr)) +* Display line number on JSON errors ([#25038](https://github.com/kubernetes/kubernetes/pull/25038), [@mfojtik](https://github.com/mfojtik)) +* If the cluster node count exceeds the GCE TargetPool maximum (currently 1000), ([#25178](https://github.com/kubernetes/kubernetes/pull/25178), [@zmerlynn](https://github.com/zmerlynn)) + * randomly select which nodes are members of Kubernetes External Load Balancers. +* Clarify supported version skew between masters, nodes, and clients ([#25087](https://github.com/kubernetes/kubernetes/pull/25087), [@ihmccreery](https://github.com/ihmccreery)) +* Move godeps to vendor/ ([#24242](https://github.com/kubernetes/kubernetes/pull/24242), [@thockin](https://github.com/thockin)) +* Introduce events flag for describers ([#24554](https://github.com/kubernetes/kubernetes/pull/24554), [@ingvagabund](https://github.com/ingvagabund)) +* run kube-addon-manager in a static pod ([#23600](https://github.com/kubernetes/kubernetes/pull/23600), [@mikedanese](https://github.com/mikedanese)) +* Reimplement 'pause' in C - smaller footprint all around ([#23009](https://github.com/kubernetes/kubernetes/pull/23009), [@uluyol](https://github.com/uluyol)) +* Add subPath to mount a child dir or file of a volumeMount ([#22575](https://github.com/kubernetes/kubernetes/pull/22575), [@MikaelCluseau](https://github.com/MikaelCluseau)) +* Handle image digests in node status and image GC ([#25088](https://github.com/kubernetes/kubernetes/pull/25088), [@ncdc](https://github.com/ncdc)) +* PLEG: reinspect pods that failed prior inspections ([#25077](https://github.com/kubernetes/kubernetes/pull/25077), [@ncdc](https://github.com/ncdc)) +* Fix kubectl create secret/configmap to allow = values ([#24989](https://github.com/kubernetes/kubernetes/pull/24989), [@derekwaynecarr](https://github.com/derekwaynecarr)) +* Upgrade installed packages when building hyperkube to improve the security profile ([#25114](https://github.com/kubernetes/kubernetes/pull/25114), [@aaronlevy](https://github.com/aaronlevy)) +* GCI/Trusty: Support ABAC authorization ([#24950](https://github.com/kubernetes/kubernetes/pull/24950), [@andyzheng0831](https://github.com/andyzheng0831)) +* fix cinder volume dir umount issue [#24717](https://github.com/kubernetes/kubernetes/pull/24717) ([#24718](https://github.com/kubernetes/kubernetes/pull/24718), [@chengyli](https://github.com/chengyli)) +* Inter pod topological affinity and anti-affinity implementation ([#22985](https://github.com/kubernetes/kubernetes/pull/22985), [@kevin-wangzefeng](https://github.com/kevin-wangzefeng)) +* start etcd compactor in background ([#25010](https://github.com/kubernetes/kubernetes/pull/25010), [@hongchaodeng](https://github.com/hongchaodeng)) +* GCI: Add two GCI specific metadata pairs ([#25105](https://github.com/kubernetes/kubernetes/pull/25105), [@andyzheng0831](https://github.com/andyzheng0831)) +* Ensure status is not changed during an update of PV, PVC, HPA objects ([#24924](https://github.com/kubernetes/kubernetes/pull/24924), [@mqliang](https://github.com/mqliang)) +* GCE: Prefer preconfigured node tags for firewalls, if available ([#25148](https://github.com/kubernetes/kubernetes/pull/25148), [@a-robinson](https://github.com/a-robinson)) +* kubectl rolling-update support for same image ([#24645](https://github.com/kubernetes/kubernetes/pull/24645), [@jlowdermilk](https://github.com/jlowdermilk)) +* Add an entry to the salt config to allow Debian jessie on GCE. ([#25123](https://github.com/kubernetes/kubernetes/pull/25123), [@jlewi](https://github.com/jlewi)) + * As with the existing Wheezy image on GCE, docker is expected + * to already be installed in the image. +* Mark kube-push.sh as broken ([#25095](https://github.com/kubernetes/kubernetes/pull/25095), [@ihmccreery](https://github.com/ihmccreery)) +* AWS: Add support for ap-northeast-2 region (Seoul) ([#24457](https://github.com/kubernetes/kubernetes/pull/24457), [@leokhoa](https://github.com/leokhoa)) +* GCI: Update the command to get the image ([#24987](https://github.com/kubernetes/kubernetes/pull/24987), [@andyzheng0831](https://github.com/andyzheng0831)) +* Port-forward: use out and error streams instead of glog ([#17030](https://github.com/kubernetes/kubernetes/pull/17030), [@csrwng](https://github.com/csrwng)) +* Promote Pod Hostname & Subdomain to fields (were annotations) ([#24362](https://github.com/kubernetes/kubernetes/pull/24362), [@ArtfulCoder](https://github.com/ArtfulCoder)) +* Validate deletion timestamp doesn't change on update ([#24839](https://github.com/kubernetes/kubernetes/pull/24839), [@liggitt](https://github.com/liggitt)) +* Add flag -t as shorthand for --tty ([#24365](https://github.com/kubernetes/kubernetes/pull/24365), [@janetkuo](https://github.com/janetkuo)) +* Add support for running clusters on GCI ([#24893](https://github.com/kubernetes/kubernetes/pull/24893), [@andyzheng0831](https://github.com/andyzheng0831)) +* Switch to ABAC authorization from AllowAll ([#24210](https://github.com/kubernetes/kubernetes/pull/24210), [@cjcullen](https://github.com/cjcullen)) +* Fix DeletingLoadBalancer event generation. ([#24833](https://github.com/kubernetes/kubernetes/pull/24833), [@a-robinson](https://github.com/a-robinson)) + + + +# v1.2.4 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.2/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.2.4/kubernetes.tar.gz) | `f3aea83f8f0e16b2b41998a2edc09eb42fd8d945` | `ab0aca3a20e8eba43c8ff9d672793618` + +## Changes since v1.2.3 + +### Other notable changes + +* Ensure status is not changed during an update of PV, PVC, HPA objects ([#24924](https://github.com/kubernetes/kubernetes/pull/24924), [@mqliang](https://github.com/mqliang)) +* GCI: Add two GCI specific metadata pairs ([#25105](https://github.com/kubernetes/kubernetes/pull/25105), [@andyzheng0831](https://github.com/andyzheng0831)) +* Add an entry to the salt config to allow Debian jessie on GCE. ([#25123](https://github.com/kubernetes/kubernetes/pull/25123), [@jlewi](https://github.com/jlewi)) + * As with the existing Wheezy image on GCE, docker is expected + * to already be installed in the image. +* Fix DeletingLoadBalancer event generation. ([#24833](https://github.com/kubernetes/kubernetes/pull/24833), [@a-robinson](https://github.com/a-robinson)) +* GCE: Prefer preconfigured node tags for firewalls, if available ([#25148](https://github.com/kubernetes/kubernetes/pull/25148), [@a-robinson](https://github.com/a-robinson)) +* Drain pods created from ReplicaSets in 'kubectl drain' ([#23689](https://github.com/kubernetes/kubernetes/pull/23689), [@maclof](https://github.com/maclof)) +* GCI: Update the command to get the image ([#24987](https://github.com/kubernetes/kubernetes/pull/24987), [@andyzheng0831](https://github.com/andyzheng0831)) +* Validate deletion timestamp doesn't change on update ([#24839](https://github.com/kubernetes/kubernetes/pull/24839), [@liggitt](https://github.com/liggitt)) +* Add support for running clusters on GCI ([#24893](https://github.com/kubernetes/kubernetes/pull/24893), [@andyzheng0831](https://github.com/andyzheng0831)) +* Trusty: Add retry in curl commands ([#24749](https://github.com/kubernetes/kubernetes/pull/24749), [@andyzheng0831](https://github.com/andyzheng0831)) + + + +# v1.3.0-alpha.3 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/master/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.3.0-alpha.3/kubernetes.tar.gz) | `01e0dc68653173614dc99f44875173478f837b38` | `ae22c35f3a963743d21daa17683e0288` + +## Changes since v1.3.0-alpha.2 + +### Action Required + +* Updating go-restful to generate "type":"object" instead of "type":"any" in swagger-spec (breaks kubectl 1.1) ([#22897](https://github.com/kubernetes/kubernetes/pull/22897), [@nikhiljindal](https://github.com/nikhiljindal)) +* Make watch cache treat resourceVersion consistent with uncached watch ([#24008](https://github.com/kubernetes/kubernetes/pull/24008), [@liggitt](https://github.com/liggitt)) + +### Other notable changes + +* Trusty: Add retry in curl commands ([#24749](https://github.com/kubernetes/kubernetes/pull/24749), [@andyzheng0831](https://github.com/andyzheng0831)) +* Collect and expose runtime's image storage usage via Kubelet's /stats/summary endpoint ([#23595](https://github.com/kubernetes/kubernetes/pull/23595), [@vishh](https://github.com/vishh)) +* Adding loadBalancer services to quota system ([#24247](https://github.com/kubernetes/kubernetes/pull/24247), [@sdminonne](https://github.com/sdminonne)) +* Enforce --max-pods in kubelet admission; previously was only enforced in scheduler ([#24674](https://github.com/kubernetes/kubernetes/pull/24674), [@gmarek](https://github.com/gmarek)) +* All clients under ClientSet share one RateLimiter. ([#24166](https://github.com/kubernetes/kubernetes/pull/24166), [@gmarek](https://github.com/gmarek)) +* Remove requirement that Endpoints IPs be IPv4 ([#23317](https://github.com/kubernetes/kubernetes/pull/23317), [@aanm](https://github.com/aanm)) +* Fix unintended change of Service.spec.ports[].nodePort during kubectl apply ([#24180](https://github.com/kubernetes/kubernetes/pull/24180), [@AdoHe](https://github.com/AdoHe)) +* Don't log private SSH key ([#24506](https://github.com/kubernetes/kubernetes/pull/24506), [@timstclair](https://github.com/timstclair)) +* Incremental improvements to kubelet e2e tests ([#24426](https://github.com/kubernetes/kubernetes/pull/24426), [@pwittrock](https://github.com/pwittrock)) +* Bridge off-cluster traffic into services by masquerading. ([#24429](https://github.com/kubernetes/kubernetes/pull/24429), [@cjcullen](https://github.com/cjcullen)) +* Flush conntrack state for removed/changed UDP Services ([#22573](https://github.com/kubernetes/kubernetes/pull/22573), [@freehan](https://github.com/freehan)) +* Allow setting the Host header in a httpGet probe ([#24292](https://github.com/kubernetes/kubernetes/pull/24292), [@errm](https://github.com/errm)) +* Fix goroutine leak in ssh-tunnel healthcheck. ([#24487](https://github.com/kubernetes/kubernetes/pull/24487), [@cjcullen](https://github.com/cjcullen)) +* Fix gce.getDiskByNameUnknownZone logic. ([#24452](https://github.com/kubernetes/kubernetes/pull/24452), [@a-robinson](https://github.com/a-robinson)) +* Make etcd cache size configurable ([#23914](https://github.com/kubernetes/kubernetes/pull/23914), [@jsravn](https://github.com/jsravn)) +* Drain pods created from ReplicaSets in 'kubectl drain' ([#23689](https://github.com/kubernetes/kubernetes/pull/23689), [@maclof](https://github.com/maclof)) +* Make kubectl edit not convert GV on edits ([#23437](https://github.com/kubernetes/kubernetes/pull/23437), [@DirectXMan12](https://github.com/DirectXMan12)) +* don't ship kube-registry-proxy and pause images in tars. ([#23605](https://github.com/kubernetes/kubernetes/pull/23605), [@mikedanese](https://github.com/mikedanese)) +* Do not throw creation errors for containers that fail immediately after being started ([#23894](https://github.com/kubernetes/kubernetes/pull/23894), [@vishh](https://github.com/vishh)) +* Add a client flag to delete "--now" for grace period 0 ([#23756](https://github.com/kubernetes/kubernetes/pull/23756), [@smarterclayton](https://github.com/smarterclayton)) +* add act-as powers ([#23549](https://github.com/kubernetes/kubernetes/pull/23549), [@deads2k](https://github.com/deads2k)) +* Build Kubernetes, etcd and flannel for arm64 and ppc64le ([#23931](https://github.com/kubernetes/kubernetes/pull/23931), [@luxas](https://github.com/luxas)) +* Honor starting resourceVersion in watch cache ([#24208](https://github.com/kubernetes/kubernetes/pull/24208), [@ncdc](https://github.com/ncdc)) +* Update the pause image to build for arm64 and ppc64le ([#23697](https://github.com/kubernetes/kubernetes/pull/23697), [@luxas](https://github.com/luxas)) +* Return more useful error information when a persistent volume fails to mount ([#23122](https://github.com/kubernetes/kubernetes/pull/23122), [@screeley44](https://github.com/screeley44)) +* Trusty: Avoid unnecessary in-memory temp files ([#24144](https://github.com/kubernetes/kubernetes/pull/24144), [@andyzheng0831](https://github.com/andyzheng0831)) +* e2e: fix error checking in kubelet stats ([#24205](https://github.com/kubernetes/kubernetes/pull/24205), [@yujuhong](https://github.com/yujuhong)) +* Fixed mounting with containerized kubelet ([#23435](https://github.com/kubernetes/kubernetes/pull/23435), [@jsafrane](https://github.com/jsafrane)) +* Adding nodeports services to quota ([#22154](https://github.com/kubernetes/kubernetes/pull/22154), [@sdminonne](https://github.com/sdminonne)) +* e2e: adapt kubelet_perf.go to use the new summary metrics API ([#24003](https://github.com/kubernetes/kubernetes/pull/24003), [@yujuhong](https://github.com/yujuhong)) +* kubelet: add RSS memory to the summary API ([#24015](https://github.com/kubernetes/kubernetes/pull/24015), [@yujuhong](https://github.com/yujuhong)) + + + +# v1.2.3 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.2/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.2.3/kubernetes.tar.gz) | `b2ce4e0c72562d09ba06e3c0913f0bd78da0285e` | `69e75650de30d5a52d144799e94a168d` + +## Changes since v1.2.2 + +### Action Required + +* Make watch cache treat resourceVersion consistent with uncached watch ([#24008](https://github.com/kubernetes/kubernetes/pull/24008), [@liggitt](https://github.com/liggitt)) + +### Other notable changes + +* Fix unintended change of Service.spec.ports[].nodePort during kubectl apply ([#24180](https://github.com/kubernetes/kubernetes/pull/24180), [@AdoHe](https://github.com/AdoHe)) +* Flush conntrack state for removed/changed UDP Services ([#22573](https://github.com/kubernetes/kubernetes/pull/22573), [@freehan](https://github.com/freehan)) +* Allow setting the Host header in a httpGet probe ([#24292](https://github.com/kubernetes/kubernetes/pull/24292), [@errm](https://github.com/errm)) +* Bridge off-cluster traffic into services by masquerading. ([#24429](https://github.com/kubernetes/kubernetes/pull/24429), [@cjcullen](https://github.com/cjcullen)) +* Version-guard Kubectl client Guestbook application test against deployments ([#24478](https://github.com/kubernetes/kubernetes/pull/24478), [@ihmccreery](https://github.com/ihmccreery)) +* Fix goroutine leak in ssh-tunnel healthcheck. ([#24487](https://github.com/kubernetes/kubernetes/pull/24487), [@cjcullen](https://github.com/cjcullen)) +* Fixed mounting with containerized kubelet ([#23435](https://github.com/kubernetes/kubernetes/pull/23435), [@jsafrane](https://github.com/jsafrane)) +* Do not throw creation errors for containers that fail immediately after being started ([#23894](https://github.com/kubernetes/kubernetes/pull/23894), [@vishh](https://github.com/vishh)) +* Honor starting resourceVersion in watch cache ([#24208](https://github.com/kubernetes/kubernetes/pull/24208), [@ncdc](https://github.com/ncdc)) +* Fix TerminationMessagePath ([#23658](https://github.com/kubernetes/kubernetes/pull/23658), [@Random-Liu](https://github.com/Random-Liu)) +* Fix gce.getDiskByNameUnknownZone logic. ([#24452](https://github.com/kubernetes/kubernetes/pull/24452), [@a-robinson](https://github.com/a-robinson)) +* kubelet: add RSS memory to the summary API ([#24015](https://github.com/kubernetes/kubernetes/pull/24015), [@yujuhong](https://github.com/yujuhong)) +* e2e: adapt kubelet_perf.go to use the new summary metrics API ([#24003](https://github.com/kubernetes/kubernetes/pull/24003), [@yujuhong](https://github.com/yujuhong)) +* e2e: fix error checking in kubelet stats ([#24205](https://github.com/kubernetes/kubernetes/pull/24205), [@yujuhong](https://github.com/yujuhong)) +* Trusty: Avoid unnecessary in-memory temp files ([#24144](https://github.com/kubernetes/kubernetes/pull/24144), [@andyzheng0831](https://github.com/andyzheng0831)) +* Allowing type object in kubectl swagger validation ([#24054](https://github.com/kubernetes/kubernetes/pull/24054), [@nikhiljindal](https://github.com/nikhiljindal)) +* Add ClusterUpgrade tests ([#24150](https://github.com/kubernetes/kubernetes/pull/24150), [@ihmccreery](https://github.com/ihmccreery)) +* Trusty: Do not create the docker-daemon cgroup ([#23996](https://github.com/kubernetes/kubernetes/pull/23996), [@andyzheng0831](https://github.com/andyzheng0831)) + + + +# v1.3.0-alpha.2 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/master/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.3.0-alpha.2/kubernetes.tar.gz) | `305c8c2af7e99d463dbbe4208ecfe2b50585e796` | `aadb8d729d855e69212008f8fda628c0` + +## Changes since v1.3.0-alpha.1 + +### Other notable changes + +* Make kube2sky and skydns docker images cross-platform ([#19376](https://github.com/kubernetes/kubernetes/pull/19376), [@luxas](https://github.com/luxas)) +* Allowing type object in kubectl swagger validation ([#24054](https://github.com/kubernetes/kubernetes/pull/24054), [@nikhiljindal](https://github.com/nikhiljindal)) +* Fix TerminationMessagePath ([#23658](https://github.com/kubernetes/kubernetes/pull/23658), [@Random-Liu](https://github.com/Random-Liu)) +* Trusty: Do not create the docker-daemon cgroup ([#23996](https://github.com/kubernetes/kubernetes/pull/23996), [@andyzheng0831](https://github.com/andyzheng0831)) +* Make ConfigMap volume readable as non-root ([#23793](https://github.com/kubernetes/kubernetes/pull/23793), [@pmorie](https://github.com/pmorie)) +* only include running and pending pods in daemonset should place calculation ([#23929](https://github.com/kubernetes/kubernetes/pull/23929), [@mikedanese](https://github.com/mikedanese)) +* Upgrade to golang 1.6 ([#22149](https://github.com/kubernetes/kubernetes/pull/22149), [@luxas](https://github.com/luxas)) +* Cross-build hyperkube and debian-iptables for ARM. Also add a flannel image ([#21617](https://github.com/kubernetes/kubernetes/pull/21617), [@luxas](https://github.com/luxas)) +* Add a timeout to the sshDialer to prevent indefinite hangs. ([#23843](https://github.com/kubernetes/kubernetes/pull/23843), [@cjcullen](https://github.com/cjcullen)) +* Ensure object returned by volume getCloudProvider incorporates cloud config ([#23769](https://github.com/kubernetes/kubernetes/pull/23769), [@saad-ali](https://github.com/saad-ali)) +* Update Dashboard UI addon to v1.0.1 ([#23724](https://github.com/kubernetes/kubernetes/pull/23724), [@maciaszczykm](https://github.com/maciaszczykm)) +* Add zsh completion for kubectl ([#23797](https://github.com/kubernetes/kubernetes/pull/23797), [@sttts](https://github.com/sttts)) +* AWS kube-up: tolerate a lack of ephemeral volumes ([#23776](https://github.com/kubernetes/kubernetes/pull/23776), [@justinsb](https://github.com/justinsb)) +* duplicate kube-apiserver to federated-apiserver ([#23509](https://github.com/kubernetes/kubernetes/pull/23509), [@jianhuiz](https://github.com/jianhuiz)) +* Kubelet: Start using the official docker engine-api ([#23506](https://github.com/kubernetes/kubernetes/pull/23506), [@Random-Liu](https://github.com/Random-Liu)) +* Fix so setup-files don't recreate/invalidate certificates that already exist ([#23550](https://github.com/kubernetes/kubernetes/pull/23550), [@luxas](https://github.com/luxas)) +* A pod never terminated if a container image registry was unavailable ([#23746](https://github.com/kubernetes/kubernetes/pull/23746), [@derekwaynecarr](https://github.com/derekwaynecarr)) +* Fix jsonpath to handle maps with key of nonstring types ([#23358](https://github.com/kubernetes/kubernetes/pull/23358), [@aveshagarwal](https://github.com/aveshagarwal)) +* Trusty: Regional release .tar.gz support ([#23558](https://github.com/kubernetes/kubernetes/pull/23558), [@andyzheng0831](https://github.com/andyzheng0831)) +* Add support for 3rd party objects to kubectl ([#18835](https://github.com/kubernetes/kubernetes/pull/18835), [@brendandburns](https://github.com/brendandburns)) +* Remove unnecessary override of /etc/init.d/docker on containervm image. ([#23593](https://github.com/kubernetes/kubernetes/pull/23593), [@dchen1107](https://github.com/dchen1107)) +* make docker-checker more robust ([#23662](https://github.com/kubernetes/kubernetes/pull/23662), [@ArtfulCoder](https://github.com/ArtfulCoder)) +* Change kube-proxy & fluentd CPU request to 20m/80m. ([#23646](https://github.com/kubernetes/kubernetes/pull/23646), [@cjcullen](https://github.com/cjcullen)) +* Create a new Deployment in kube-system for every version. ([#23512](https://github.com/kubernetes/kubernetes/pull/23512), [@Q-Lee](https://github.com/Q-Lee)) +* IngressTLS: allow secretName to be blank for SNI routing ([#23500](https://github.com/kubernetes/kubernetes/pull/23500), [@tam7t](https://github.com/tam7t)) +* don't sync deployment when pod selector is empty ([#23467](https://github.com/kubernetes/kubernetes/pull/23467), [@mikedanese](https://github.com/mikedanese)) +* AWS: Fix problems with >2 security groups ([#23340](https://github.com/kubernetes/kubernetes/pull/23340), [@justinsb](https://github.com/justinsb)) + + + +# v1.2.2 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.2/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.2.2/kubernetes.tar.gz) | `8dede5833a1986434adea80749624f81a0db7bb4` | `72a5389f22827fb5133fdc3b7bfb9b3a` + +## Changes since v1.2.1 + +### Other notable changes + +* Trusty: Update heapster manifest handling code ([#23434](https://github.com/kubernetes/kubernetes/pull/23434), [@andyzheng0831](https://github.com/andyzheng0831)) +* Support addon Deployments, make heapster a deployment with a nanny. ([#22893](https://github.com/kubernetes/kubernetes/pull/22893), [@Q-Lee](https://github.com/Q-Lee)) +* Create a new Deployment in kube-system for every version. ([#23512](https://github.com/kubernetes/kubernetes/pull/23512), [@Q-Lee](https://github.com/Q-Lee)) +* Use SCP to dump logs and parallelize a bit. ([#22835](https://github.com/kubernetes/kubernetes/pull/22835), [@spxtr](https://github.com/spxtr)) +* Trusty: Regional release .tar.gz support ([#23558](https://github.com/kubernetes/kubernetes/pull/23558), [@andyzheng0831](https://github.com/andyzheng0831)) +* Make ConfigMap volume readable as non-root ([#23793](https://github.com/kubernetes/kubernetes/pull/23793), [@pmorie](https://github.com/pmorie)) +* only include running and pending pods in daemonset should place calculation ([#23929](https://github.com/kubernetes/kubernetes/pull/23929), [@mikedanese](https://github.com/mikedanese)) +* A pod never terminated if a container image registry was unavailable ([#23746](https://github.com/kubernetes/kubernetes/pull/23746), [@derekwaynecarr](https://github.com/derekwaynecarr)) +* Update Dashboard UI addon to v1.0.1 ([#23724](https://github.com/kubernetes/kubernetes/pull/23724), [@maciaszczykm](https://github.com/maciaszczykm)) +* Ensure object returned by volume getCloudProvider incorporates cloud config ([#23769](https://github.com/kubernetes/kubernetes/pull/23769), [@saad-ali](https://github.com/saad-ali)) +* Add a timeout to the sshDialer to prevent indefinite hangs. ([#23843](https://github.com/kubernetes/kubernetes/pull/23843), [@cjcullen](https://github.com/cjcullen)) +* AWS kube-up: tolerate a lack of ephemeral volumes ([#23776](https://github.com/kubernetes/kubernetes/pull/23776), [@justinsb](https://github.com/justinsb)) +* Fix so setup-files don't recreate/invalidate certificates that already exist ([#23550](https://github.com/kubernetes/kubernetes/pull/23550), [@luxas](https://github.com/luxas)) + +# v1.2.1 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.2/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.2.1/kubernetes.tar.gz) | `1639807c5788e1c6b1ab51fd30b723fb5debd865` | `235a1da47972c96a560d718d3256ca4f` + + +## Changes since v1.2.0 + +### Other notable changes + +* AWS: Fix problems with >2 security groups ([#23340](https://github.com/kubernetes/kubernetes/pull/23340), [@justinsb](https://github.com/justinsb)) +* IngressTLS: allow secretName to be blank for SNI routing ([#23500](https://github.com/kubernetes/kubernetes/pull/23500), [@tam7t](https://github.com/tam7t)) +* Heapster patch release to 1.0.2 ([#23487](https://github.com/kubernetes/kubernetes/pull/23487), [@piosz](https://github.com/piosz)) +* Remove unnecessary override of /etc/init.d/docker on containervm image. ([#23593](https://github.com/kubernetes/kubernetes/pull/23593), [@dchen1107](https://github.com/dchen1107)) +* Change kube-proxy & fluentd CPU request to 20m/80m. ([#23646](https://github.com/kubernetes/kubernetes/pull/23646), [@cjcullen](https://github.com/cjcullen)) +* make docker-checker more robust ([#23662](https://github.com/kubernetes/kubernetes/pull/23662), [@ArtfulCoder](https://github.com/ArtfulCoder)) +* validate that daemonsets don't have empty selectors on creation ([#23530](https://github.com/kubernetes/kubernetes/pull/23530), [@mikedanese](https://github.com/mikedanese)) +* don't sync deployment when pod selector is empty ([#23467](https://github.com/kubernetes/kubernetes/pull/23467), [@mikedanese](https://github.com/mikedanese)) +* Support differentiation of OS distro in e2e tests ([#23466](https://github.com/kubernetes/kubernetes/pull/23466), [@andyzheng0831](https://github.com/andyzheng0831)) +* don't sync daemonsets with selectors that match all pods ([#23223](https://github.com/kubernetes/kubernetes/pull/23223), [@mikedanese](https://github.com/mikedanese)) +* Trusty: Avoid reaching GCE custom metadata size limit ([#22818](https://github.com/kubernetes/kubernetes/pull/22818), [@andyzheng0831](https://github.com/andyzheng0831)) +* Update kubectl help for 1.2 resources ([#23305](https://github.com/kubernetes/kubernetes/pull/23305), [@janetkuo](https://github.com/janetkuo)) +* Removing URL query param from swagger UI to fix the XSS issue ([#23234](https://github.com/kubernetes/kubernetes/pull/23234), [@nikhiljindal](https://github.com/nikhiljindal)) +* Fix hairpin mode ([#23325](https://github.com/kubernetes/kubernetes/pull/23325), [@MurgaNikolay](https://github.com/MurgaNikolay)) +* Bump to container-vm-v20160321 ([#23313](https://github.com/kubernetes/kubernetes/pull/23313), [@zmerlynn](https://github.com/zmerlynn)) +* Remove the restart-kube-proxy and restart-apiserver functions ([#23180](https://github.com/kubernetes/kubernetes/pull/23180), [@roberthbailey](https://github.com/roberthbailey)) +* Copy annotations back from RS to Deployment on rollback ([#23160](https://github.com/kubernetes/kubernetes/pull/23160), [@janetkuo](https://github.com/janetkuo)) +* Trusty: Support hybrid cluster with nodes on ContainerVM ([#23079](https://github.com/kubernetes/kubernetes/pull/23079), [@andyzheng0831](https://github.com/andyzheng0831)) +* update expose command description to add deployment ([#23246](https://github.com/kubernetes/kubernetes/pull/23246), [@AdoHe](https://github.com/AdoHe)) +* Add a rate limiter to the GCE cloudprovider ([#23019](https://github.com/kubernetes/kubernetes/pull/23019), [@alex-mohr](https://github.com/alex-mohr)) +* Add a Deployment example for kubectl expose. ([#23222](https://github.com/kubernetes/kubernetes/pull/23222), [@madhusudancs](https://github.com/madhusudancs)) +* Use versioned object when computing patch ([#23145](https://github.com/kubernetes/kubernetes/pull/23145), [@liggitt](https://github.com/liggitt)) +* kubelet: send all recevied pods in one update ([#23141](https://github.com/kubernetes/kubernetes/pull/23141), [@yujuhong](https://github.com/yujuhong)) +* Add a SSHKey sync check to the master's healthz (when using SSHTunnels). ([#23167](https://github.com/kubernetes/kubernetes/pull/23167), [@cjcullen](https://github.com/cjcullen)) +* Validate minimum CPU limits to be >= 10m ([#23143](https://github.com/kubernetes/kubernetes/pull/23143), [@vishh](https://github.com/vishh)) +* Fix controller-manager race condition issue which cause endpoints flush during restart ([#23035](https://github.com/kubernetes/kubernetes/pull/23035), [@xinxiaogang](https://github.com/xinxiaogang)) +* MESOS: forward globally declared cadvisor housekeeping flags ([#22974](https://github.com/kubernetes/kubernetes/pull/22974), [@jdef](https://github.com/jdef)) +* Trusty: support developer workflow on base image ([#22960](https://github.com/kubernetes/kubernetes/pull/22960), [@andyzheng0831](https://github.com/andyzheng0831)) + + + +# v1.3.0-alpha.1 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/HEAD/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.3.0-alpha.1/kubernetes.tar.gz) | `e0041b08e220a4704ea2ad90a6ec7c8f2120c2d3` | `7bb2df32aea94678f72a8d1f43a12098` + +## Changes since v1.2.0 + +### Action Required + +* Disabling swagger ui by default on apiserver. Adding a flag that can enable it ([#23025](https://github.com/kubernetes/kubernetes/pull/23025), [@nikhiljindal](https://github.com/nikhiljindal)) +* restore ability to run against secured etcd ([#21535](https://github.com/kubernetes/kubernetes/pull/21535), [@AdoHe](https://github.com/AdoHe)) + +### Other notable changes + +* validate that daemonsets don't have empty selectors on creation ([#23530](https://github.com/kubernetes/kubernetes/pull/23530), [@mikedanese](https://github.com/mikedanese)) +* Trusty: Update heapster manifest handling code ([#23434](https://github.com/kubernetes/kubernetes/pull/23434), [@andyzheng0831](https://github.com/andyzheng0831)) +* Support differentiation of OS distro in e2e tests ([#23466](https://github.com/kubernetes/kubernetes/pull/23466), [@andyzheng0831](https://github.com/andyzheng0831)) +* don't sync daemonsets with selectors that match all pods ([#23223](https://github.com/kubernetes/kubernetes/pull/23223), [@mikedanese](https://github.com/mikedanese)) +* Trusty: Avoid reaching GCE custom metadata size limit ([#22818](https://github.com/kubernetes/kubernetes/pull/22818), [@andyzheng0831](https://github.com/andyzheng0831)) +* Update kubectl help for 1.2 resources ([#23305](https://github.com/kubernetes/kubernetes/pull/23305), [@janetkuo](https://github.com/janetkuo)) +* Support addon Deployments, make heapster a deployment with a nanny. ([#22893](https://github.com/kubernetes/kubernetes/pull/22893), [@Q-Lee](https://github.com/Q-Lee)) +* Removing URL query param from swagger UI to fix the XSS issue ([#23234](https://github.com/kubernetes/kubernetes/pull/23234), [@nikhiljindal](https://github.com/nikhiljindal)) +* Fix hairpin mode ([#23325](https://github.com/kubernetes/kubernetes/pull/23325), [@MurgaNikolay](https://github.com/MurgaNikolay)) +* Bump to container-vm-v20160321 ([#23313](https://github.com/kubernetes/kubernetes/pull/23313), [@zmerlynn](https://github.com/zmerlynn)) +* Remove the restart-kube-proxy and restart-apiserver functions ([#23180](https://github.com/kubernetes/kubernetes/pull/23180), [@roberthbailey](https://github.com/roberthbailey)) +* Copy annotations back from RS to Deployment on rollback ([#23160](https://github.com/kubernetes/kubernetes/pull/23160), [@janetkuo](https://github.com/janetkuo)) +* Trusty: Support hybrid cluster with nodes on ContainerVM ([#23079](https://github.com/kubernetes/kubernetes/pull/23079), [@andyzheng0831](https://github.com/andyzheng0831)) +* update expose command description to add deployment ([#23246](https://github.com/kubernetes/kubernetes/pull/23246), [@AdoHe](https://github.com/AdoHe)) +* Add a rate limiter to the GCE cloudprovider ([#23019](https://github.com/kubernetes/kubernetes/pull/23019), [@alex-mohr](https://github.com/alex-mohr)) +* Add a Deployment example for kubectl expose. ([#23222](https://github.com/kubernetes/kubernetes/pull/23222), [@madhusudancs](https://github.com/madhusudancs)) +* Use versioned object when computing patch ([#23145](https://github.com/kubernetes/kubernetes/pull/23145), [@liggitt](https://github.com/liggitt)) +* kubelet: send all recevied pods in one update ([#23141](https://github.com/kubernetes/kubernetes/pull/23141), [@yujuhong](https://github.com/yujuhong)) +* Add a SSHKey sync check to the master's healthz (when using SSHTunnels). ([#23167](https://github.com/kubernetes/kubernetes/pull/23167), [@cjcullen](https://github.com/cjcullen)) +* Validate minimum CPU limits to be >= 10m ([#23143](https://github.com/kubernetes/kubernetes/pull/23143), [@vishh](https://github.com/vishh)) +* Fix controller-manager race condition issue which cause endpoints flush during restart ([#23035](https://github.com/kubernetes/kubernetes/pull/23035), [@xinxiaogang](https://github.com/xinxiaogang)) +* MESOS: forward globally declared cadvisor housekeeping flags ([#22974](https://github.com/kubernetes/kubernetes/pull/22974), [@jdef](https://github.com/jdef)) +* Trusty: support developer workflow on base image ([#22960](https://github.com/kubernetes/kubernetes/pull/22960), [@andyzheng0831](https://github.com/andyzheng0831)) +* Bumped Heapster to stable version 1.0.0 ([#22993](https://github.com/kubernetes/kubernetes/pull/22993), [@piosz](https://github.com/piosz)) +* Deprecating --api-version flag ([#22410](https://github.com/kubernetes/kubernetes/pull/22410), [@nikhiljindal](https://github.com/nikhiljindal)) +* allow resource.version.group in kubectl ([#22853](https://github.com/kubernetes/kubernetes/pull/22853), [@deads2k](https://github.com/deads2k)) +* Use SCP to dump logs and parallelize a bit. ([#22835](https://github.com/kubernetes/kubernetes/pull/22835), [@spxtr](https://github.com/spxtr)) +* update wide option output ([#22772](https://github.com/kubernetes/kubernetes/pull/22772), [@AdoHe](https://github.com/AdoHe)) +* Change scheduler logic from random to round-robin ([#22430](https://github.com/kubernetes/kubernetes/pull/22430), [@gmarek](https://github.com/gmarek)) + + + +# v1.2.0 + +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.2/examples) + +## Downloads + +binary | sha1 hash | md5 hash +------ | --------- | -------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.2.0/kubernetes.tar.gz) | `52dd998e1191f464f581a9b87017d70ce0b058d9` | `c0ce9e6150e9d7a19455db82f3318b4c` + +## Changes since v1.1.1 + +### Major Themes + + * Significant scale improvements. Increased cluster scale by 400% to 1000 nodes with 30,000 pods per cluster. +Kubelet supports 100 pods per node with 4x reduced system overhead. + * Simplified application deployment and management. + * Dynamic Configuration (ConfigMap API in the core API group) enables application +configuration to be stored as a Kubernetes API object and pulled dynamically on +container startup, as an alternative to baking in command-line flags when a +container is built. + * Turnkey Deployments (Deployment API (Beta) in the Extensions API group) +automate deployment and rolling updates of applications, specified +declaratively. It handles versioning, multiple simultaneous rollouts, +aggregating status across all pods, maintaining application availability, and +rollback. + * Automated cluster management: + * Kubernetes clusters can now span zones within a cloud provider. Pods from a +service will be automatically spread across zones, enabling applications to +tolerate zone failure. + * Simplified way to run a container on every node (DaemonSet API (Beta) in the +Extensions API group): Kubernetes can schedule a service (such as a logging +agent) that runs one, and only one, pod per node. + * TLS and L7 support (Ingress API (Beta) in the Extensions API group): Kubernetes +is now easier to integrate into custom networking environments by supporting +TLS for secure communication and L7 http-based traffic routing. + * Graceful Node Shutdown (aka drain) - The new “kubectl drain” command gracefully +evicts pods from nodes in preparation for disruptive operations like kernel +upgrades or maintenance. + * Custom Metrics for Autoscaling (HorizontalPodAutoscaler API in the Autoscaling +API group): The Horizontal Pod Autoscaling feature now supports custom metrics +(Alpha), allowing you to specify application-level metrics and thresholds to +trigger scaling up and down the number of pods in your application. + * New GUI (dashboard) allows you to get started quickly and enables the same +functionality found in the CLI as a more approachable and discoverable way of +interacting with the system. Note: the GUI is enabled by default in 1.2 clusters. + +Dashboard UI screenshot showing cards that represent applications that run inside a cluster + +### Other notable improvements + + * Job was Beta in 1.1 and is GA in 1.2 . + * apiVersion: batch/v1 is now available. You now do not need to specify the .spec.selector field — a [unique selector is automatically generated ](http://kubernetes.io/docs/user-guide/jobs/#pod-selector)for you. + * The previous version, apiVersion: extensions/v1beta1, is still supported. Even if you roll back to 1.1, the objects created using +the new apiVersion will still be accessible, using the old version. You can +continue to use your existing JSON and YAML files until you are ready to switch +to batch/v1. We may remove support for Jobs with apiVersion: extensions/v1beta1 in 1.3 or 1.4. + * HorizontalPodAutoscaler was Beta in 1.1 and is GA in 1.2 . + * apiVersion: autoscaling/v1 is now available. Changes in this version are: + * Field CPUUtilization which was a nested structure CPUTargetUtilization in +HorizontalPodAutoscalerSpec was replaced by TargetCPUUtilizationPercentage +which is an integer. + * ScaleRef of type SubresourceReference in HorizontalPodAutoscalerSpec which +referred to scale subresource of the resource being scaled was replaced by +ScaleTargetRef which points just to the resource being scaled. + * In extensions/v1beta1 if CPUUtilization in HorizontalPodAutoscalerSpec was not +specified it was set to 80 by default while in autoscaling/v1 HPA object +without TargetCPUUtilizationPercentage specified is a valid object. Pod +autoscaler controller will apply a default scaling policy in this case which is +equivalent to the previous one but may change in the future. + * The previous version, apiVersion: extensions/v1beta1, is still supported. Even if you roll back to 1.1, the objects created using +the new apiVersions will still be accessible, using the old version. You can +continue to use your existing JSON and YAML files until you are ready to switch +to autoscaling/v1. We may remove support for HorizontalPodAutoscalers with apiVersion: extensions/v1beta1 in 1.3 or 1.4. + * Kube-Proxy now defaults to an iptables-based proxy. If the --proxy-mode flag is +specified while starting kube-proxy (‘userspace’ or ‘iptables’), the flag value +will be respected. If the flag value is not specified, the kube-proxy respects +the Node object annotation: ‘net.beta.kubernetes.io/proxy-mode’. If the +annotation is not specified, then ‘iptables’ mode is the default. If kube-proxy +is unable to start in iptables mode because system requirements are not met +(kernel or iptables versions are insufficient), the kube-proxy will fall-back +to userspace mode. Kube-proxy is much more performant and less +resource-intensive in ‘iptables’ mode. + * Node stability can be improved by reserving [resources](https://github.com/kubernetes/kubernetes/blob/release-1.2/docs/proposals/node-allocatable.md) for the base operating system using --system-reserved and --kube-reserved Kubelet flags + * Liveness and readiness probes now support more configuration parameters: +periodSeconds, successThreshold, failureThreshold + * The new ReplicaSet API (Beta) in the Extensions API group is similar to +ReplicationController, but its [selector](http://kubernetes.io/docs/user-guide/labels/#label-selectors) is more general (supports set-based selector; whereas ReplicationController +only supports equality-based selector). + * Scale subresource support is now expanded to ReplicaSets along with +ReplicationControllers and Deployments. Scale now supports two different types +of selectors to accommodate both [equality-based selectors](http://kubernetes.io/docs/user-guide/labels/#equality-based-requirement) supported by ReplicationControllers and [set-based selectors](http://kubernetes.io/docs/user-guide/labels/#set-based-requirement) supported by Deployments and ReplicaSets. + * “kubectl run” now produces Deployments (instead of ReplicationControllers) and +Jobs (instead of Pods) by default. + * Pods can now consume Secret data in environment variables and inject those +environment variables into a container’s command-line args. + * Stable version of Heapster which scales up to 1000 nodes: more metrics, reduced +latency, reduced cpu/memory consumption (~4mb per monitored node). + * Pods now have a security context which allows users to specify: + * attributes which apply to the whole pod: + * User ID + * Whether all containers should be non-root + * Supplemental Groups + * FSGroup - a special supplemental group + * SELinux options + * If a pod defines an FSGroup, that Pod’s system (emptyDir, secret, configMap, +etc) volumes and block-device volumes will be owned by the FSGroup, and each +container in the pod will run with the FSGroup as a supplemental group + * Volumes that support SELinux labelling are now automatically relabeled with the +Pod’s SELinux context, if specified + * A stable client library release\_1\_2 is added. The library is [here](pkg/client/clientset_generated/), and detailed doc is [here](docs/devel/generating-clientset.md#released-clientsets). We will keep the interface of this go client stable. + * New Azure File Service Volume Plugin enables mounting Microsoft Azure File +Volumes (SMB 2.1 and 3.0) into a Pod. See [example](https://github.com/kubernetes/kubernetes/blob/release-1.2/examples/azure_file/README.md) for details. + * Logs usage and root filesystem usage of a container, volumes usage of a pod and node disk usage are exposed through Kubelet new metrics API. + +### Experimental Features + + * Dynamic Provisioning of PersistentVolumes: Kubernetes previously required all +volumes to be manually provisioned by a cluster administrator before use. With +this feature, volume plugins that support it (GCE PD, AWS EBS, and Cinder) can +automatically provision a PersistentVolume to bind to an unfulfilled +PersistentVolumeClaim. + * Run multiple schedulers in parallel, e.g. one or more custom schedulers +alongside the default Kubernetes scheduler, using pod annotations to select +among the schedulers for each pod. Documentation is [here](http://kubernetes.io/docs/admin/multiple-schedulers.md), design doc is [here](docs/proposals/multiple-schedulers.md). + * More expressive node affinity syntax, and support for “soft” node affinity. +Node selectors (to constrain pods to schedule on a subset of nodes) now support +the operators {In, NotIn, Exists, DoesNotExist, Gt, Lt} instead of just conjunction of exact match on node label values. In +addition, we’ve introduced a new “soft” kind of node selector that is just a +hint to the scheduler; the scheduler will try to satisfy these requests but it +does not guarantee they will be satisfied. Both the “hard” and “soft” variants +of node affinity use the new syntax. Documentation is [here](http://kubernetes.io/docs/user-guide/node-selection/) (see section “Alpha feature in Kubernetes v1.2: Node Affinity“). Design doc is [here](https://github.com/kubernetes/kubernetes/blob/release-1.2/docs/design/nodeaffinity.md). + * A pod can specify its own Hostname and Subdomain via annotations (pod.beta.kubernetes.io/hostname, pod.beta.kubernetes.io/subdomain). If the Subdomain matches the name of a [headless service](http://kubernetes.io/docs/user-guide/services/#headless-services) in the same namespace, a DNS A record is also created for the pod’s FQDN. More +details can be found in the [DNS README](https://github.com/kubernetes/kubernetes/blob/release-1.2/cluster/saltbase/salt/kube-dns/README.md#a-records-and-hostname-based-on-pod-annotations---a-beta-feature-in-kubernetes-v12). Changes were introduced in PR [#20688](https://github.com/kubernetes/kubernetes/pull/20688). + * New SchedulerExtender enables users to implement custom +out-of-(the-scheduler)-process scheduling predicates and priority functions, +for example to schedule pods based on resources that are not directly managed +by Kubernetes. Changes were introduced in PR [#13580](https://github.com/kubernetes/kubernetes/pull/13580). Example configuration and documentation is available [here](docs/design/scheduler_extender.md). This is an alpha feature and may not be supported in its current form at beta +or GA. + * New Flex Volume Plugin enables users to use out-of-process volume plugins that +are installed to “/usr/libexec/kubernetes/kubelet-plugins/volume/exec/” on +every node, instead of being compiled into the Kubernetes binary. See [example](examples/volumes/flexvolume/README.md) for details. + * vendor volumes into a pod. It expects vendor drivers are installed in the +volume plugin path on each kubelet node. This is an alpha feature and may +change in future. + * Kubelet exposes a new Alpha metrics API - /stats/summary in a user friendly format with reduced system overhead. The measurement is done in PR [#22542](https://github.com/kubernetes/kubernetes/pull/22542). + +### Action required + + * Docker v1.9.1 is officially recommended. Docker v1.8.3 and Docker v1.10 are +supported. If you are using an older release of Docker, please upgrade. Known +issues with Docker 1.9.1 can be found below. + * CPU hardcapping will be enabled by default for containers with CPU limit set, +if supported by the kernel. You should either adjust your CPU limit, or set CPU +request only, if you want to avoid hardcapping. If the kernel does not support +CPU Quota, NodeStatus will contain a warning indicating that CPU Limits cannot +be enforced. + * The following applies only if you use the Go language client (/pkg/client/unversioned) to create Job by defining Go variables of type "k8s.io/kubernetes/pkg/apis/extensions".Job). We think this is not common, so if you are not sure what this means, you probably aren't doing this. If +you do this, then, at the time you re-vendor the "k8s.io/kubernetes/" code, you will need to set job.Spec.ManualSelector = true, or else set job.Spec.Selector = nil. Otherwise, the jobs you create may be rejected. See [Specifying your own pod selector](http://kubernetes.io/docs/user-guide/jobs/#specifying-your-own-pod-selector). + * Deployment was Alpha in 1.1 (though it had apiVersion extensions/v1beta1) and +was disabled by default. Due to some non-backward-compatible API changes, any +Deployment objects you created in 1.1 won’t work with in the 1.2 release. + * Before upgrading to 1.2, delete all Deployment alpha-version resources, including the Replication Controllers and Pods the Deployment manages. Then +create Deployment Beta resources after upgrading to 1.2. Not deleting the +Deployment objects may cause the deployment controller to mistakenly match +other pods and delete them, due to the selector API change. + * Client (kubectl) and server versions must match (both 1.1 or both 1.2) for any +Deployment-related operations. + * Behavior change: + * Deployment creates ReplicaSets instead of ReplicationControllers. + * Scale subresource now has a new targetSelector field in its status. This field supports the new set-based selectors supported +by Deployments, but in a serialized format. + * Spec change: + * Deployment’s [selector](http://kubernetes.io/docs/user-guide/labels/#label-selectors) is now more general (supports set-based selector; it only supported +equality-based selector in 1.1). + * .spec.uniqueLabelKey is removed -- users can’t customize unique label key -- +and its default value is changed from +“deployment.kubernetes.io/podTemplateHash” to “pod-template-hash”. + * .spec.strategy.rollingUpdate.minReadySeconds is moved to .spec.minReadySeconds + * DaemonSet was Alpha in 1.1 (though it had apiVersion extensions/v1beta1) and +was disabled by default. Due to some non-backward-compatible API changes, any +DaemonSet objects you created in 1.1 won’t work with in the 1.2 release. + * Before upgrading to 1.2, delete all DaemonSet alpha-version resources. If you do not want to disrupt the pods, use kubectl delete daemonset +--cascade=false. Then create DaemonSet Beta resources after upgrading to 1.2. + * Client (kubectl) and server versions must match (both 1.1 or both 1.2) for any +DaemonSet-related operations. + * Behavior change: + * DaemonSet pods will be created on nodes with .spec.unschedulable=true and will +not be evicted from nodes whose Ready condition is false. + * Updates to the pod template are now permitted. To perform a rolling update of a +DaemonSet, update the pod template and then delete its pods one by one; they +will be replaced using the updated template. + * Spec change: + * DaemonSet’s [selector](http://kubernetes.io/docs/user-guide/labels/#label-selectors) is now more general (supports set-based selector; it only supported +equality-based selector in 1.1). + * Running against a secured etcd requires these flags to be passed to +kube-apiserver (instead of --etcd-config): + * --etcd-certfile, --etcd-keyfile (if using client cert auth) + * --etcd-cafile (if not using system roots) + * As part of preparation in 1.2 for adding support for protocol buffers (and the +direct YAML support in the API available today), the Content-Type and Accept +headers are now properly handled as per the HTTP spec. As a consequence, if +you had a client that was sending an invalid Content-Type or Accept header to +the API, in 1.2 you will either receive a 415 or 406 error. +The only client +this is known to affect is curl when you use -d with JSON but don't set a +content type, helpfully sends "application/x-www-urlencoded", which is not +correct. +Other client authors should double check that you are sending proper +accept and content type headers, or set no value (in which case JSON is the +default). +An example using curl: +curl -H "Content-Type: application/json" -XPOST -d +'{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"kube-system"}}' "[http://127.0.0.1:8080/api/v1/namespaces](http://127.0.0.1:8080/api/v1/namespaces)" + * The version of InfluxDB is bumped from 0.8 to 0.9 which means storage schema +change. More details [here](https://docs.influxdata.com/influxdb/v0.9/administration/upgrading/). + * We have renamed “minions” to “nodes”. If you were specifying NUM\_MINIONS or +MINION\_SIZE to kube-up, you should now specify NUM\_NODES or NODE\_SIZE. + +### Known Issues + + * Paused deployments can't be resized and don't clean up old ReplicaSets. + * Minimum memory limit is 4MB. This is a docker limitation + * Minimum CPU limits is 10m. This is a Linux Kernel limitation + * “kubectl rollout undo” (i.e. rollback) will hang on paused deployments, because +paused deployments can’t be rolled back (this is expected), and the command +waits for rollback events to return the result. Users should use “kubectl +rollout resume” to resume a deployment before rolling back. + * “kubectl edit ” will open the editor multiple times, once for each +resource in the list. + * If you create HPA object using autoscaling/v1 API without specifying +targetCPUUtilizationPercentage and read it using kubectl it will print default +value as specified in extensions/v1beta1 (see details in [#23196](https://github.com/kubernetes/kubernetes/issues/23196)). + * If a node or kubelet crashes with a volume attached, the volume will remain +attached to that node. If that volume can only be attached to one node at a +time (GCE PDs attached in RW mode, for example), then the volume must be +manually detached before Kubernetes can attach it to other nodes. + * If a volume is already attached to a node any subsequent attempts to attach it +again (due to kubelet restart, for example) will fail. The volume must either +be manually detached first or the pods referencing it deleted (which would +trigger automatic volume detach). + * In very large clusters it may happen that a few nodes won’t register in API +server in a given timeframe for whatever reasons (networking issue, machine +failure, etc.). Normally when kube-up script will encounter even one NotReady +node it will fail, even though the cluster most likely will be working. We +added an environmental variable to kube-up ALLOWED\_NOTREADY\_NODES that +defines the number of nodes that if not Ready in time won’t cause kube-up +failure. + * “kubectl rolling-update” only supports Replication Controllers (it doesn’t +support Replica Sets). It’s recommended to use Deployment 1.2 with “kubectl +rollout” commands instead, if you want to rolling update Replica Sets. + * When live upgrading Kubelet to 1.2 without draining the pods running on the node, +the containers will be restarted by Kubelet (see details in [#23104](https://github.com/kubernetes/kubernetes/issues/23104)). + +#### Docker Known Issues + +##### 1.9.1 + + * Listing containers can be slow at times which will affect kubelet performance. +More information [here](https://github.com/docker/docker/issues/17720) + * Docker daemon restarts can fail. Docker checkpoints have to deleted between +restarts. More information [here](https://github.com/kubernetes/kubernetes/issues/20995) + * Pod IP allocation-related issues. Deleting the docker checkpoint prior to +restarting the daemon alleviates this issue, but hasn’t been verified to +completely eliminate the IP allocation issue. More information [here](https://github.com/kubernetes/kubernetes/issues/21523#issuecomment-191498969) + * Daemon becomes unresponsive (rarely) due to kernel deadlocks. More information [here](https://github.com/kubernetes/kubernetes/issues/21866#issuecomment-189492391) + +### Provider-specific Notes + +#### Various + + Core changes: + + * Support for load balancers with source ranges + +#### AWS + +Core changes: + + * Support for ELBs with complex configurations: better subnet selection with +multiple subnets, and internal ELBs + * Support for VPCs with private dns names + * Multiple fixes to EBS volume mounting code for robustness, and to support +mounting the full number of AWS recommended volumes. + * Multiple fixes to avoid hitting AWS rate limits, and to throttle if we do + * Support for the EC2 Container Registry (currently in us-east-1 only) + +With kube-up: + + * Automatically install updates on boot & reboot + * Use optimized image based on Jessie by default + * Add support for Ubuntu Wily + * Master is configured with automatic restart-on-failure, via CloudWatch + * Bootstrap reworked to be more similar to GCE; better supports reboots/restarts + * Use an elastic IP for the master by default + * Experimental support for node spot instances (set NODE\_SPOT\_PRICE=0.05) + +#### GCE + + * Ubuntu Trusty support added + +Please see the [Releases Page](https://github.com/kubernetes/kubernetes/releases) for older releases. + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/CHANGELOG.md?pixel)]() diff --git a/vendor/k8s.io/kubernetes/CONTRIB.md b/vendor/k8s.io/kubernetes/CONTRIB.md new file mode 100644 index 000000000000..99121a456dc3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/CONTRIB.md @@ -0,0 +1,4 @@ +[Moved to CONTRIBUTING.md](CONTRIBUTING.md) + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/CONTRIB.md?pixel)]() diff --git a/vendor/k8s.io/kubernetes/CONTRIBUTING.md b/vendor/k8s.io/kubernetes/CONTRIBUTING.md new file mode 100644 index 000000000000..f344c1d706a1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/CONTRIBUTING.md @@ -0,0 +1,46 @@ +# Contributing guidelines + +Want to hack on kubernetes? Yay! + +## Developer Guide + +We have a [Developer's Guide](docs/devel/README.md) that outlines everything you need to know from setting up your dev environment to how to get faster Pull Request reviews. If you find something undocumented or incorrect along the way, please feel free to send a Pull Request. + +## Filing issues + +If you have a question about Kubernetes or have a problem using it, please read the [troubleshooting guide](docs/troubleshooting.md) before filing an issue. + +## How to become a contributor and submit your own code + +### Contributor License Agreements + +We'd love to accept your patches! Before we can take them, we have to jump a couple of legal hurdles. + +Please fill out either the individual or corporate Contributor License Agreement (CLA). + + * If you are an individual writing original source code and you're sure you own the intellectual property, then you'll need to sign an [individual CLA](http://code.google.com/legal/individual-cla-v1.0.html). + * If you work for a company that wants to allow you to contribute your work, then you'll need to sign a [corporate CLA](http://code.google.com/legal/corporate-cla-v1.0.html). + +Follow either of the two links above to access the appropriate CLA and instructions for how to sign and return it. Once we receive it, we'll be able to accept your pull requests. + +***NOTE***: Only original source code from you and other people that have signed the CLA can be accepted into the main repository. This policy does not apply to [third_party](third_party/). + +### Contributing A Patch + +1. Submit an issue describing your proposed change to the repo in question. +1. The repo owner will respond to your issue promptly. +1. If your proposed change is accepted, and you haven't already done so, sign a Contributor License Agreement (see details above). +1. Fork the desired repo, develop and test your code changes. +1. Submit a pull request. + +### Protocols for Collaborative Development + +Please read [this doc](docs/devel/collab.md) for information on how we're running development for the project. +Also take a look at the [development guide](docs/devel/development.md) for information on how to set up your environment, run tests, manage dependencies, etc. + +### Adding dependencies + +If your patch depends on new packages, add that package with [`godep`](https://github.com/tools/godep). Follow the [instructions to add a dependency](docs/devel/development.md#godep-and-dependency-management). + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/CONTRIBUTING.md?pixel)]() diff --git a/vendor/k8s.io/kubernetes/DESIGN.md b/vendor/k8s.io/kubernetes/DESIGN.md new file mode 100644 index 000000000000..c8af5216222b --- /dev/null +++ b/vendor/k8s.io/kubernetes/DESIGN.md @@ -0,0 +1,10 @@ +# Kubernetes Overview + +See the [user guide overview](docs/user-guide/overview.md) for an introduction to Kubernetes and its core concepts. + +See the [design overview](docs/design/) for an overview of the system design. + +See the [API overview](docs/api.md) and [conventions](docs/devel/api-conventions.md) for an overview of the API design. + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/DESIGN.md?pixel)]() diff --git a/vendor/k8s.io/kubernetes/Makefile b/vendor/k8s.io/kubernetes/Makefile new file mode 100644 index 000000000000..af0950c6b1e8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/Makefile @@ -0,0 +1,286 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DBG_MAKEFILE ?= +ifeq ($(DBG_MAKEFILE),1) + $(warning ***** starting Makefile for goal(s) "$(MAKECMDGOALS)") + $(warning ***** $(shell date)) +else + # If we're not debugging the Makefile, don't echo recipes. + MAKEFLAGS += -s +endif + + +# Old-skool build tools. +# +# Commonly used targets (see each target for more information): +# all: Build code. +# test: Run tests. +# clean: Clean up. + +# It's necessary to set this because some environments don't link sh -> bash. +SHELL := /bin/bash + +# We don't need make's built-in rules. +MAKEFLAGS += --no-builtin-rules +.SUFFIXES: + +# Constants used throughout. +.EXPORT_ALL_VARIABLES: +OUT_DIR ?= _output +BIN_DIR := $(OUT_DIR)/bin +PRJ_SRC_PATH := k8s.io/kubernetes +GENERATED_FILE_PREFIX := zz_generated. + +# Metadata for driving the build lives here. +META_DIR := .make + +# Our build flags. +# TODO(thockin): it would be nice to just use the native flags. Can we EOL +# these "wrapper" flags? +KUBE_GOFLAGS := $(GOFLAGS) +KUBE_GOLDFLAGS := $(GOLDFLAGS) +KUBE_GOGCFLAGS = $(GOGCFLAGS) + +# This controls the verbosity of the build. Higher numbers mean more output. +KUBE_VERBOSE ?= 1 + +# Build code. +# +# Args: +# WHAT: Directory names to build. If any of these directories has a 'main' +# package, the build will produce executable files under $(OUT_DIR)/go/bin. +# If not specified, "everything" will be built. +# GOFLAGS: Extra flags to pass to 'go' when building. +# GOLDFLAGS: Extra linking flags passed to 'go' when building. +# GOGCFLAGS: Additional go compile flags passed to 'go' when building. +# +# Example: +# make +# make all +# make all WHAT=cmd/kubelet GOFLAGS=-v +# make all GOGCFLAGS="-N -l" +# Note: Use the -N -l options to disable compiler optimizations an inlining. +# Using these build options allows you to subsequently use source +# debugging tools like delve. +.PHONY: all +all: generated_files + hack/make-rules/build.sh $(WHAT) + +# Build ginkgo +# +# Example: +# make ginkgo +.PHONY: ginkgo +ginkgo: + hack/make-rules/build.sh vendor/github.com/onsi/ginkgo/ginkgo + +# Runs all the presubmission verifications. +# +# Args: +# BRANCH: Branch to be passed to verify-godeps.sh script. +# +# Example: +# make verify +# make verify BRANCH=branch_x +.PHONY: verify +verify: + KUBE_VERIFY_GIT_BRANCH=$(BRANCH) hack/make-rules/verify.sh -v + hack/make-rules/vet.sh + +# Build and run tests. +# +# Args: +# WHAT: Directory names to test. All *_test.go files under these +# directories will be run. If not specified, "everything" will be tested. +# TESTS: Same as WHAT. +# GOFLAGS: Extra flags to pass to 'go' when building. +# GOLDFLAGS: Extra linking flags to pass to 'go' when building. +# GOGCFLAGS: Additional go compile flags passed to 'go' when building. +# +# Example: +# make check +# make test +# make check WHAT=pkg/kubelet GOFLAGS=-v +.PHONY: check test +check test: generated_files + hack/make-rules/test.sh $(WHAT) $(TESTS) + +# Build and run integration tests. +# +# Args: +# WHAT: Directory names to test. All *_test.go files under these +# directories will be run. If not specified, "everything" will be tested. +# +# Example: +# make test-integration +.PHONY: test-integration +test-integration: generated_files + hack/make-rules/test-integration.sh $(WHAT) + +# Build and run end-to-end tests. +# +# Example: +# make test-e2e +.PHONY: test-e2e +test-e2e: ginkgo generated_files + go run hack/e2e.go -v --build --up --test --down + +# Build and run node end-to-end tests. +# +# Args: +# FOCUS: Regexp that matches the tests to be run. Defaults to "". +# SKIP: Regexp that matches the tests that needs to be skipped. Defaults +# to "". +# RUN_UNTIL_FAILURE: If true, pass --untilItFails to ginkgo so tests are run +# repeatedly until they fail. Defaults to false. +# REMOTE: If true, run the tests on a remote host instance on GCE. Defaults +# to false. +# IMAGES: For REMOTE=true only. Comma delimited list of images for creating +# remote hosts to run tests against. Defaults to a recent image. +# LIST_IMAGES: If true, don't run tests. Just output the list of available +# images for testing. Defaults to false. +# HOSTS: For REMOTE=true only. Comma delimited list of running gce hosts to +# run tests against. Defaults to "". +# DELETE_INSTANCES: For REMOTE=true only. Delete any instances created as +# part of this test run. Defaults to false. +# ARTIFACTS: For REMOTE=true only. Local directory to scp test artifacts into +# from the remote hosts. Defaults to "/tmp/_artifacts". +# REPORT: For REMOTE=false only. Local directory to write juntil xml results +# to. Defaults to "/tmp/". +# CLEANUP: For REMOTE=true only. If false, do not stop processes or delete +# test files on remote hosts. Defaults to true. +# IMAGE_PROJECT: For REMOTE=true only. Project containing images provided to +# IMAGES. Defaults to "kubernetes-node-e2e-images". +# INSTANCE_PREFIX: For REMOTE=true only. Instances created from images will +# have the name "${INSTANCE_PREFIX}-${IMAGE_NAME}". Defaults to "test". +# INSTANCE_METADATA: For REMOTE=true and running on GCE only. +# GUBERNATOR: For REMOTE=true only. Produce link to Gubernator to view logs. +# Defaults to false. +# +# Example: +# make test-e2e-node FOCUS=Kubelet SKIP=container +# make test-e2e-node REMOTE=true DELETE_INSTANCES=true +# make test-e2e-node TEST_ARGS="--cgroups-per-qos=true" +# Build and run tests. +.PHONY: test-e2e-node +test-e2e-node: ginkgo generated_files + hack/make-rules/test-e2e-node.sh + +# Build and run cmdline tests. +# +# Example: +# make test-cmd +.PHONY: test-cmd +test-cmd: generated_files + hack/make-rules/test-cmd.sh + +# Remove all build artifacts. +# +# Example: +# make clean +# +# TODO(thockin): call clean_generated when we stop committing generated code. +.PHONY: clean +clean: clean_meta + build/make-clean.sh + rm -rf $(OUT_DIR) + rm -rf Godeps/_workspace # Just until we are sure it is gone + +# Remove make-related metadata files. +# +# Example: +# make clean_meta +.PHONY: clean_meta +clean_meta: + rm -rf $(META_DIR) + +# Remove all auto-generated artifacts. Generated artifacts in staging folder should not be removed as they are not +# generated using generated_files. +# +# Example: +# make clean_generated +.PHONY: clean_generated +clean_generated: + find . -type f -name $(GENERATED_FILE_PREFIX)\* | grep -v "[.]/staging/.*" | xargs rm -f + +# Run 'go vet'. +# +# Args: +# WHAT: Directory names to vet. All *.go files under these +# directories will be vetted. If not specified, "everything" will be +# vetted. +# +# Example: +# make vet +# make vet WHAT=pkg/kubelet +.PHONY: vet +vet: + hack/make-rules/vet.sh $(WHAT) + +# Build a release +# +# Example: +# make release +.PHONY: release +release: + build/release.sh + +# Build a release, but skip tests +# +# Example: +# make release-skip-tests +.PHONY: release-skip-tests quick-release +release-skip-tests quick-release: + KUBE_RELEASE_RUN_TESTS=n KUBE_FASTBUILD=true build/release.sh + +# Cross-compile for all platforms +# +# Example: +# make cross +.PHONY: cross +cross: + hack/make-rules/cross.sh + +# Add rules for all directories in cmd/ +# +# Example: +# make kubectl kube-proxy +.PHONY: $(notdir $(abspath $(wildcard cmd/*/))) +$(notdir $(abspath $(wildcard cmd/*/))): generated_files + hack/make-rules/build.sh cmd/$@ + +# Add rules for all directories in plugin/cmd/ +# +# Example: +# make kube-scheduler +.PHONY: $(notdir $(abspath $(wildcard plugin/cmd/*/))) +$(notdir $(abspath $(wildcard plugin/cmd/*/))): generated_files + hack/make-rules/build.sh plugin/cmd/$@ + +# Add rules for all directories in federation/cmd/ +# +# Example: +# make federation-apiserver federation-controller-manager +.PHONY: $(notdir $(abspath $(wildcard federation/cmd/*/))) +$(notdir $(abspath $(wildcard federation/cmd/*/))): generated_files + hack/make-rules/build.sh federation/cmd/$@ + +# Produce auto-generated files needed for the build. +# +# Example: +# make generated_files +.PHONY: generated_files +generated_files: + $(MAKE) -f Makefile.$@ $@ CALLED_FROM_MAIN_MAKEFILE=1 diff --git a/vendor/k8s.io/kubernetes/Makefile.generated_files b/vendor/k8s.io/kubernetes/Makefile.generated_files new file mode 100644 index 000000000000..83835b4c2f0d --- /dev/null +++ b/vendor/k8s.io/kubernetes/Makefile.generated_files @@ -0,0 +1,569 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Don't allow users to call this directly. There are too many variables this +# assumes to inherit from the main Makefile. This is not a user-facing file. +ifeq ($(CALLED_FROM_MAIN_MAKEFILE),) + $(error Please use the main Makefile, e.g. `make generated_files`) +endif + +# Don't allow an implicit 'all' rule. This is not a user-facing file. +ifeq ($(MAKECMDGOALS),) + $(error This Makefile requires an explicit rule to be specified) +endif + +ifeq ($(DBG_MAKEFILE),1) + $(warning ***** starting Makefile.generated_files for goal(s) "$(MAKECMDGOALS)") + $(warning ***** $(shell date)) +endif + + +# It's necessary to set this because some environments don't link sh -> bash. +SHELL := /bin/bash + +# This rule collects all the generated file sets into a single rule. Other +# rules should depend on this to ensure generated files are rebuilt. +.PHONY: generated_files +generated_files: gen_deepcopy gen_conversion gen_openapi + +# Code-generation logic. +# +# This stuff can be pretty tricky, and there's probably some corner cases that +# we don't handle well. That said, here's a straightforward test to prove that +# the most common cases work. Sadly, it is manual. +# +# make clean +# find . -name .make\* | xargs rm -f +# find . -name zz_generated\* | xargs rm -f +# # verify `find . -name zz_generated.deepcopy.go | wc -l` is 0 +# # verify `find . -name .make | wc -l` is 0 +# +# make nonexistent +# # expect "No rule to make target" +# # verify `find .make/ -type f | wc -l` has many files +# +# make gen_deepcopy +# # expect deepcopy-gen is built exactly once +# # expect many files to be regenerated +# # verify `find . -name zz_generated.deepcopy.go | wc -l` has files +# make gen_deepcopy +# # expect nothing to be rebuilt, finish in O(seconds) +# touch pkg/api/types.go +# make gen_deepcopy +# # expect one file to be regenerated +# make gen_deepcopy +# # expect nothing to be rebuilt, finish in O(seconds) +# touch cmd/libs/go2idl/deepcopy-gen/main.go +# make gen_deepcopy +# # expect deepcopy-gen is built exactly once +# # expect many files to be regenerated +# # verify `find . -name zz_generated.deepcopy.go | wc -l` has files +# make gen_deepcopy +# # expect nothing to be rebuilt, finish in O(seconds) +# +# make gen_conversion +# # expect conversion-gen is built exactly once +# # expect many files to be regenerated +# # verify `find . -name zz_generated.conversion.go | wc -l` has files +# make gen_conversion +# # expect nothing to be rebuilt, finish in O(seconds) +# touch pkg/api/types.go +# make gen_conversion +# # expect one file to be regenerated +# make gen_conversion +# # expect nothing to be rebuilt, finish in O(seconds) +# touch cmd/libs/go2idl/conversion-gen/main.go +# make gen_conversion +# # expect conversion-gen is built exactly once +# # expect many files to be regenerated +# # verify `find . -name zz_generated.conversion.go | wc -l` has files +# make gen_conversion +# # expect nothing to be rebuilt, finish in O(seconds) +# +# make all +# # expect it to build +# +# make test +# # expect it to pass +# +# make clean +# # verify `find . -name zz_generated.deepcopy.go | wc -l` is 0 +# # verify `find . -name .make | wc -l` is 0 +# +# make all WHAT=cmd/kube-proxy +# # expect it to build +# +# make clean +# make test WHAT=cmd/kube-proxy +# # expect it to pass + +# This variable holds a list of every directory that contains Go files in this +# project. Other rules and variables can use this as a starting point to +# reduce filesystem accesses. +ifeq ($(DBG_MAKEFILE),1) + $(warning ***** finding all *.go dirs) +endif +ALL_GO_DIRS := $(shell \ + hack/make-rules/helpers/cache_go_dirs.sh $(META_DIR)/all_go_dirs.mk \ +) + +# The name of the metadata file which lists *.go files in each pkg. +GOFILES_META := gofiles.mk + +# Establish a dependency between the deps file and the dir. Whenever a dir +# changes (files added or removed) the deps file will be considered stale. +# +# The variable value was set in $(GOFILES_META) and included as part of the +# dependency management logic. +# +# This is looser than we really need (e.g. we don't really care about non *.go +# files or even *_test.go files), but this is much easier to represent. +# +# Because we 'sinclude' the deps file, it is considered for rebuilding, as part +# of make's normal evaluation. If it gets rebuilt, make will restart. +# +# The '$(eval)' is needed because this has a different RHS for each LHS, and +# would otherwise produce results that make can't parse. +$(foreach dir, $(ALL_GO_DIRS), $(eval \ + $(META_DIR)/$(dir)/$(GOFILES_META): $(dir) \ +)) + +# How to rebuild a deps file. When make determines that the deps file is stale +# (see above), it executes this rule, and then re-loads the deps file. +# +# This is looser than we really need (e.g. we don't really care about test +# files), but this is MUCH faster than calling `go list`. +# +# We regenerate the output file in order to satisfy make's "newer than" rules, +# but we only need to rebuild targets if the contents actually changed. That +# is what the .stamp file represents. +$(foreach dir, $(ALL_GO_DIRS), \ + $(META_DIR)/$(dir)/$(GOFILES_META)): + FILES=$$(ls $$@.tmp; \ + cmp -s $@.tmp $@ || touch $@.stamp; \ + mv $@.tmp $@ +# This is required to fill in the DAG, since some cases (e.g. 'make clean all') +# will reference the .stamp file when it doesn't exist. We don't need to +# rebuild it in that case, just keep make happy. +$(foreach dir, $(ALL_GO_DIRS), \ + $(META_DIR)/$(dir)/$(GOFILES_META).stamp): + +# Include any deps files as additional Makefile rules. This triggers make to +# consider the deps files for rebuild, which makes the whole +# dependency-management logic work. 'sinclude' is "silent include" which does +# not fail if the file does not exist. +$(foreach dir, $(ALL_GO_DIRS), $(eval \ + sinclude $(META_DIR)/$(dir)/$(GOFILES_META) \ +)) + +# Generate a list of all files that have a `+k8s:` comment-tag. This will be +# used to derive lists of files/dirs for generation tools. +ifeq ($(DBG_MAKEFILE),1) + $(warning ***** finding all +k8s: tags) +endif +ALL_K8S_TAG_FILES := $(shell \ + find $(ALL_GO_DIRS) -maxdepth 1 -type f -name \*.go \ + | xargs grep --color=never -l '^// *+k8s:' \ +) + +# +# Deep-copy generation +# +# Any package that wants deep-copy functions generated must include a +# comment-tag in column 0 of one file of the form: +# // +k8s:deepcopy-gen= +# +# The may be one of: +# generate: generate deep-copy functions into the package +# register: generate deep-copy functions and register them with a +# scheme + +# The result file, in each pkg, of deep-copy generation. +DEEPCOPY_BASENAME := $(GENERATED_FILE_PREFIX)deepcopy +DEEPCOPY_FILENAME := $(DEEPCOPY_BASENAME).go + +# The tool used to generate deep copies. +DEEPCOPY_GEN := $(BIN_DIR)/deepcopy-gen + +# Find all the directories that request deep-copy generation. +ifeq ($(DBG_MAKEFILE),1) + $(warning ***** finding all +k8s:deepcopy-gen tags) +endif +DEEPCOPY_DIRS := $(shell \ + grep --color=never -l '+k8s:deepcopy-gen=' $(ALL_K8S_TAG_FILES) \ + | xargs -n1 dirname \ + | sort -u \ +) +DEEPCOPY_FILES := $(addsuffix /$(DEEPCOPY_FILENAME), $(DEEPCOPY_DIRS)) + +# This rule aggregates the set of files to generate and then generates them all +# in a single run of the tool. +.PHONY: gen_deepcopy +gen_deepcopy: $(DEEPCOPY_FILES) + if [[ -f $(META_DIR)/$(DEEPCOPY_GEN).todo ]]; then \ + ./hack/run-in-gopath.sh $(DEEPCOPY_GEN) \ + --v $(KUBE_VERBOSE) \ + -i $$(cat $(META_DIR)/$(DEEPCOPY_GEN).todo | paste -sd, -) \ + --bounding-dirs $(PRJ_SRC_PATH) \ + -O $(DEEPCOPY_BASENAME); \ + fi + +# For each dir in DEEPCOPY_DIRS, this establishes a dependency between the +# output file and the input files that should trigger a rebuild. +# +# Note that this is a deps-only statement, not a full rule (see below). This +# has to be done in a distinct step because wildcards don't work in static +# pattern rules. +# +# The '$(eval)' is needed because this has a different RHS for each LHS, and +# would otherwise produce results that make can't parse. +# +# We depend on the $(GOFILES_META).stamp to detect when the set of input files +# has changed. This allows us to detect deleted input files. +$(foreach dir, $(DEEPCOPY_DIRS), $(eval \ + $(dir)/$(DEEPCOPY_FILENAME): $(META_DIR)/$(dir)/$(GOFILES_META).stamp \ + $(gofiles__$(dir)) \ +)) + +# Unilaterally remove any leftovers from previous runs. +$(shell rm -f $(META_DIR)/$(DEEPCOPY_GEN)*.todo) + +# How to regenerate deep-copy code. This is a little slow to run, so we batch +# it up and trigger the batch from the 'generated_files' target. +$(DEEPCOPY_FILES): $(DEEPCOPY_GEN) + mkdir -p $$(dirname $(META_DIR)/$(DEEPCOPY_GEN)) + echo $(PRJ_SRC_PATH)/$(@D) >> $(META_DIR)/$(DEEPCOPY_GEN).todo + +# This calculates the dependencies for the generator tool, so we only rebuild +# it when needed. It is PHONY so that it always runs, but it only updates the +# file if the contents have actually changed. We 'sinclude' this later. +.PHONY: $(META_DIR)/$(DEEPCOPY_GEN).mk +$(META_DIR)/$(DEEPCOPY_GEN).mk: + mkdir -p $(@D); \ + (echo -n "$(DEEPCOPY_GEN): "; \ + DIRECT=$$(go list -f '{{.Dir}} {{.Dir}}/*.go' \ + ./cmd/libs/go2idl/deepcopy-gen); \ + INDIRECT=$$(go list \ + -f '{{range .Deps}}{{.}}{{"\n"}}{{end}}' \ + ./cmd/libs/go2idl/deepcopy-gen \ + | grep --color=never "^$(PRJ_SRC_PATH)" \ + | sed 's|^$(PRJ_SRC_PATH)|./|' \ + | xargs go list -f '{{.Dir}} {{.Dir}}/*.go'); \ + echo $$DIRECT $$INDIRECT \ + | sed 's/ / \\=,/g' \ + | tr '=,' '\n\t'; \ + ) | sed "s|$$(pwd -P)/||" > $@.tmp; \ + cmp -s $@.tmp $@ || cat $@.tmp > $@ && rm -f $@.tmp + +# Include dependency info for the generator tool. This will cause the rule of +# the same name to be considered and if it is updated, make will restart. +sinclude $(META_DIR)/$(DEEPCOPY_GEN).mk + +# How to build the generator tool. The deps for this are defined in +# the $(DEEPCOPY_GEN).mk, above. +# +# A word on the need to touch: This rule might trigger if, for example, a +# non-Go file was added or deleted from a directory on which this depends. +# This target needs to be reconsidered, but Go realizes it doesn't actually +# have to be rebuilt. In that case, make will forever see the dependency as +# newer than the binary, and try to rebuild it over and over. So we touch it, +# and make is happy. +$(DEEPCOPY_GEN): + hack/make-rules/build.sh cmd/libs/go2idl/deepcopy-gen + touch $@ + +# +# Open-api generation +# +# Any package that wants open-api functions generated must include a +# comment-tag in column 0 of one file of the form: +# // +k8s:openapi-gen=true +# +# The result file, in each pkg, of open-api generation. +OPENAPI_BASENAME := $(GENERATED_FILE_PREFIX)openapi +OPENAPI_FILENAME := $(OPENAPI_BASENAME).go + +# The tool used to generate open apis. +OPENAPI_GEN := $(BIN_DIR)/openapi-gen + +# Find all the directories that request open-api generation. +ifeq ($(DBG_MAKEFILE),1) + $(warning ***** finding all +k8s:openapi-gen tags) +endif +OPENAPI_DIRS := $(shell \ + grep --color=never -l '+k8s:openapi-gen=' $(ALL_K8S_TAG_FILES) \ + | xargs -n1 dirname \ + | sort -u \ +) + +OPENAPI_FILES := $(addsuffix /$(OPENAPI_FILENAME), $(OPENAPI_DIRS)) + +# This rule aggregates the set of files to generate and then generates them all +# in a single run of the tool. +.PHONY: gen_openapi +gen_openapi: $(OPENAPI_FILES) + if [[ -f $(META_DIR)/$(OPENAPI_GEN).todo ]]; then \ + ./hack/run-in-gopath.sh $(OPENAPI_GEN) \ + --v $(KUBE_VERBOSE) \ + -i $$(cat $(META_DIR)/$(OPENAPI_GEN).todo | paste -sd, -) \ + -O $(OPENAPI_BASENAME); \ + fi + +# For each dir in OPENAPI_DIRS, this establishes a dependency between the +# output file and the input files that should trigger a rebuild. +# +# Note that this is a deps-only statement, not a full rule (see below). This +# has to be done in a distinct step because wildcards don't work in static +# pattern rules. +# +# The '$(eval)' is needed because this has a different RHS for each LHS, and +# would otherwise produce results that make can't parse. +# +# We depend on the $(GOFILES_META).stamp to detect when the set of input files +# has changed. This allows us to detect deleted input files. +$(foreach dir, $(OPENAPI_DIRS), $(eval \ + $(dir)/$(OPENAPI_FILENAME): $(META_DIR)/$(dir)/$(GOFILES_META).stamp \ + $(gofiles__$(dir)) \ +)) + +# Unilaterally remove any leftovers from previous runs. +$(shell rm -f $(META_DIR)/$(OPENAPI_GEN)*.todo) + +# How to regenerate open-api code. We need to collect these up and trigger one +# single run to generate definition for all types. +$(OPENAPI_FILES): $(OPENAPI_GEN) + mkdir -p $$(dirname $(META_DIR)/$(OPENAPI_GEN)) + echo $(PRJ_SRC_PATH)/$(@D) >> $(META_DIR)/$(OPENAPI_GEN).todo + +# This calculates the dependencies for the generator tool, so we only rebuild +# it when needed. It is PHONY so that it always runs, but it only updates the +# file if the contents have actually changed. We 'sinclude' this later. +.PHONY: $(META_DIR)/$(OPENAPI_GEN).mk +$(META_DIR)/$(OPENAPI_GEN).mk: + mkdir -p $(@D); \ + (echo -n "$(OPENAPI_GEN): "; \ + DIRECT=$$(go list -f '{{.Dir}} {{.Dir}}/*.go' \ + ./cmd/libs/go2idl/openapi-gen); \ + INDIRECT=$$(go list \ + -f '{{range .Deps}}{{.}}{{"\n"}}{{end}}' \ + ./cmd/libs/go2idl/openapi-gen \ + | grep --color=never "^$(PRJ_SRC_PATH)" \ + | sed 's|^$(PRJ_SRC_PATH)|./|' \ + | xargs go list -f '{{.Dir}} {{.Dir}}/*.go'); \ + echo $$DIRECT $$INDIRECT \ + | sed 's/ / \\=,/g' \ + | tr '=,' '\n\t'; \ + ) | sed "s|$$(pwd -P)/||" > $@.tmp; \ + cmp -s $@.tmp $@ || cat $@.tmp > $@ && rm -f $@.tmp + +# Include dependency info for the generator tool. This will cause the rule of +# the same name to be considered and if it is updated, make will restart. +sinclude $(META_DIR)/$(OPENAPI_GEN).mk + +# How to build the generator tool. The deps for this are defined in +# the $(OPENAPI_GEN).mk, above. +# +# A word on the need to touch: This rule might trigger if, for example, a +# non-Go file was added or deleted from a directory on which this depends. +# This target needs to be reconsidered, but Go realizes it doesn't actually +# have to be rebuilt. In that case, make will forever see the dependency as +# newer than the binary, and try to rebuild it over and over. So we touch it, +# and make is happy. +$(OPENAPI_GEN): + hack/make-rules/build.sh cmd/libs/go2idl/openapi-gen + touch $@ + +# +# Conversion generation +# +# Any package that wants conversion functions generated must include one or +# more comment-tags in any .go file, in column 0, of the form: +# // +k8s:conversion-gen= +# +# The CONVERSION_TARGET_DIR is a project-local path to another directory which +# should be considered when evaluating peer types for conversions. Types which +# are found in the source package (where conversions are being generated) +# but do not have a peer in one of the target directories will not have +# conversions generated. +# +# TODO: it might be better in the long term to make peer-types explicit in the +# IDL. + +# The result file, in each pkg, of conversion generation. +CONVERSION_BASENAME := $(GENERATED_FILE_PREFIX)conversion +CONVERSION_FILENAME := $(CONVERSION_BASENAME).go + +# The tool used to generate conversions. +CONVERSION_GEN := $(BIN_DIR)/conversion-gen + +# The name of the metadata file listing conversion peers for each pkg. +CONVERSIONS_META := conversions.mk + +# All directories that request any form of conversion generation. +ifeq ($(DBG_MAKEFILE),1) + $(warning ***** finding all +k8s:conversion-gen tags) +endif +CONVERSION_DIRS := $(shell \ + grep --color=never '^// *+k8s:conversion-gen=' $(ALL_K8S_TAG_FILES) \ + | cut -f1 -d: \ + | xargs -n1 dirname \ + | sort -u \ +) + +CONVERSION_FILES := $(addsuffix /$(CONVERSION_FILENAME), $(CONVERSION_DIRS)) + +# This rule aggregates the set of files to generate and then generates them all +# in a single run of the tool. +.PHONY: gen_conversion +gen_conversion: $(CONVERSION_FILES) + if [[ -f $(META_DIR)/$(CONVERSION_GEN).todo ]]; then \ + ./hack/run-in-gopath.sh $(CONVERSION_GEN) \ + --v $(KUBE_VERBOSE) \ + -i $$(cat $(META_DIR)/$(CONVERSION_GEN).todo | paste -sd, -) \ + -O $(CONVERSION_BASENAME); \ + fi + +# Establish a dependency between the deps file and the dir. Whenever a dir +# changes (files added or removed) the deps file will be considered stale. +# +# This is looser than we really need (e.g. we don't really care about non *.go +# files or even *_test.go files), but this is much easier to represent. +# +# Because we 'sinclude' the deps file, it is considered for rebuilding, as part +# of make's normal evaluation. If it gets rebuilt, make will restart. +# +# The '$(eval)' is needed because this has a different RHS for each LHS, and +# would otherwise produce results that make can't parse. +$(foreach dir, $(CONVERSION_DIRS), $(eval \ + $(META_DIR)/$(dir)/$(CONVERSIONS_META): $(dir) \ +)) + +# How to rebuild a deps file. When make determines that the deps file is stale +# (see above), it executes this rule, and then re-loads the deps file. +# +# This is looser than we really need (e.g. we don't really care about test +# files), but this is MUCH faster than calling `go list`. +# +# We regenerate the output file in order to satisfy make's "newer than" rules, +# but we only need to rebuild targets if the contents actually changed. That +# is what the .stamp file represents. +$(foreach dir, $(CONVERSION_DIRS), \ + $(META_DIR)/$(dir)/$(CONVERSIONS_META)): + TAGS=$$(grep --color=never -h '^// *+k8s:conversion-gen=' $$@.tmp; \ + cmp -s $@.tmp $@ || touch $@.stamp; \ + mv $@.tmp $@ + +# Include any deps files as additional Makefile rules. This triggers make to +# consider the deps files for rebuild, which makes the whole +# dependency-management logic work. 'sinclude' is "silent include" which does +# not fail if the file does not exist. +$(foreach dir, $(CONVERSION_DIRS), $(eval \ + sinclude $(META_DIR)/$(dir)/$(CONVERSIONS_META) \ +)) + +# For each dir in CONVERSION_DIRS, this establishes a dependency between the +# output file and the input files that should trigger a rebuild. +# +# The variable value was set in $(GOFILES_META) and included as part of the +# dependency management logic. +# +# Note that this is a deps-only statement, not a full rule (see below). This +# has to be done in a distinct step because wildcards don't work in static +# pattern rules. +# +# The '$(eval)' is needed because this has a different RHS for each LHS, and +# would otherwise produce results that make can't parse. +# +# We depend on the $(GOFILES_META).stamp to detect when the set of input files +# has changed. This allows us to detect deleted input files. +$(foreach dir, $(CONVERSION_DIRS), $(eval \ + $(dir)/$(CONVERSION_FILENAME): $(META_DIR)/$(dir)/$(GOFILES_META).stamp \ + $(gofiles__$(dir)) \ +)) + +# For each dir in CONVERSION_DIRS, for each target in $(conversions__$(dir)), +# this establishes a dependency between the output file and the input files +# that should trigger a rebuild. +# +# The variable value was set in $(GOFILES_META) and included as part of the +# dependency management logic. +# +# Note that this is a deps-only statement, not a full rule (see below). This +# has to be done in a distinct step because wildcards don't work in static +# pattern rules. +# +# The '$(eval)' is needed because this has a different RHS for each LHS, and +# would otherwise produce results that make can't parse. +# +# We depend on the $(GOFILES_META).stamp to detect when the set of input files +# has changed. This allows us to detect deleted input files. +$(foreach dir, $(CONVERSION_DIRS), \ + $(foreach tgt, $(conversions__$(dir)), $(eval \ + $(dir)/$(CONVERSION_FILENAME): $(META_DIR)/$(tgt)/$(GOFILES_META).stamp \ + $(gofiles__$(tgt)) \ + )) \ +) + +# Unilaterally remove any leftovers from previous runs. +$(shell rm -f $(META_DIR)/$(CONVERSION_GEN)*.todo) + +# How to regenerate conversion code. This is a little slow to run, so we batch +# it up and trigger the batch from the 'generated_files' target. +$(CONVERSION_FILES): $(CONVERSION_GEN) + mkdir -p $$(dirname $(META_DIR)/$(CONVERSION_GEN)) + echo $(PRJ_SRC_PATH)/$(@D) >> $(META_DIR)/$(CONVERSION_GEN).todo + +# This calculates the dependencies for the generator tool, so we only rebuild +# it when needed. It is PHONY so that it always runs, but it only updates the +# file if the contents have actually changed. We 'sinclude' this later. +.PHONY: $(META_DIR)/$(CONVERSION_GEN).mk +$(META_DIR)/$(CONVERSION_GEN).mk: + mkdir -p $(@D); \ + (echo -n "$(CONVERSION_GEN): "; \ + DIRECT=$$(go list -f '{{.Dir}} {{.Dir}}/*.go' \ + ./cmd/libs/go2idl/conversion-gen); \ + INDIRECT=$$(go list \ + -f '{{range .Deps}}{{.}}{{"\n"}}{{end}}' \ + ./cmd/libs/go2idl/conversion-gen \ + | grep --color=never "^$(PRJ_SRC_PATH)" \ + | sed 's|^$(PRJ_SRC_PATH)|./|' \ + | xargs go list -f '{{.Dir}} {{.Dir}}/*.go'); \ + echo $$DIRECT $$INDIRECT \ + | sed 's/ / \\=,/g' \ + | tr '=,' '\n\t'; \ + ) | sed "s|$$(pwd -P)/||" > $@.tmp; \ + cmp -s $@.tmp $@ || cat $@.tmp > $@ && rm -f $@.tmp + +# Include dependency info for the generator tool. This will cause the rule of +# the same name to be considered and if it is updated, make will restart. +sinclude $(META_DIR)/$(CONVERSION_GEN).mk + +# How to build the generator tool. The deps for this are defined in +# the $(CONVERSION_GEN).mk, above. +# +# A word on the need to touch: This rule might trigger if, for example, a +# non-Go file was added or deleted from a directory on which this depends. +# This target needs to be reconsidered, but Go realizes it doesn't actually +# have to be rebuilt. In that case, make will forever see the dependency as +# newer than the binary, and try to rebuild it over and over. So we touch it, +# and make is happy. +$(CONVERSION_GEN): + hack/make-rules/build.sh cmd/libs/go2idl/conversion-gen + touch $@ diff --git a/vendor/k8s.io/kubernetes/OWNERS b/vendor/k8s.io/kubernetes/OWNERS new file mode 100644 index 000000000000..b55c4799aab8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/OWNERS @@ -0,0 +1,7 @@ +assignees: + - bgrant0607 + - brendandburns + - dchen1107 + - lavalamp + - smarterclayton + - thockin diff --git a/vendor/k8s.io/kubernetes/README.md b/vendor/k8s.io/kubernetes/README.md new file mode 100644 index 000000000000..7678eb1daccc --- /dev/null +++ b/vendor/k8s.io/kubernetes/README.md @@ -0,0 +1,160 @@ +# Kubernetes + +[![Submit Queue Widget]][Submit Queue] [![GoDoc Widget]][GoDoc] [![Coverage Status Widget]][Coverage Status] + +[Submit Queue]: http://submit-queue.k8s.io/#/e2e +[Submit Queue Widget]: http://submit-queue.k8s.io/health.svg?v=1 +[GoDoc]: https://godoc.org/k8s.io/kubernetes +[GoDoc Widget]: https://godoc.org/k8s.io/kubernetes?status.svg +[Coverage Status]: https://coveralls.io/r/kubernetes/kubernetes +[Coverage Status Widget]: https://coveralls.io/repos/kubernetes/kubernetes/badge.svg + +### Are you ... + + * Interested in learning more about using Kubernetes? Please see our user-facing documentation on [kubernetes.io](http://kubernetes.io) + * Interested in hacking on the core Kubernetes code base? Keep reading! + +
+ +Kubernetes is an open source system for managing [containerized applications](https://github.com/kubernetes/kubernetes/wiki/Why-Kubernetes%3F#why-containers) across multiple hosts, +providing basic mechanisms for deployment, maintenance, and scaling of applications. + +Kubernetes is: + +* **lean**: lightweight, simple, accessible +* **portable**: public, private, hybrid, multi cloud +* **extensible**: modular, pluggable, hookable, composable +* **self-healing**: auto-placement, auto-restart, auto-replication + +Kubernetes builds upon a [decade and a half of experience at Google running production workloads at scale](https://research.google.com/pubs/pub43438.html), combined with best-of-breed ideas and practices from the community. + +
+ +### Kubernetes is ready for Production! + +With the [1.0.1 release](https://github.com/kubernetes/kubernetes/releases/tag/v1.0.1) Kubernetes is ready to serve your production workloads. + +### Kubernetes can run anywhere! + +You can run Kubernetes on your local workstation under Vagrant, cloud providers (e.g. GCE, AWS, Azure), and physical hardware. Essentially, anywhere Linux runs you can run Kubernetes. Checkout the [Getting Started Guides](http://kubernetes.io/docs/getting-started-guides/) for details. + +## Concepts + +Kubernetes works with the following concepts: + +[**Cluster**](docs/admin/README.md) +: A cluster is a set of physical or virtual machines and other infrastructure resources used by Kubernetes to run your applications. Kubernetes can run anywhere! See the [Getting Started Guides](docs/getting-started-guides/) for instructions for a variety of services. + +[**Node**](docs/admin/node.md) +: A node is a physical or virtual machine running Kubernetes, onto which pods can be scheduled. + +[**Pod**](docs/user-guide/pods.md) +: Pods are a colocated group of application containers with shared volumes. They're the smallest deployable units that can be created, scheduled, and managed with Kubernetes. Pods can be created individually, but it's recommended that you use a replication controller even if creating a single pod. + +[**Replication controller**](docs/user-guide/replication-controller.md) +: Replication controllers manage the lifecycle of pods. They ensure that a specified number of pods are running +at any given time, by creating or killing pods as required. + +[**Service**](docs/user-guide/services.md) +: Services provide a single, stable name and address for a set of pods. +They act as basic load balancers. + +[**Label**](docs/user-guide/labels.md) +: Labels are used to organize and select groups of objects based on key:value pairs. + +## Documentation + +Kubernetes documentation is organized into several categories. + + - **Getting started guides** + - for people who want to create a Kubernetes cluster + - in [Creating a Kubernetes Cluster](docs/getting-started-guides/README.md) + - for people who want to port Kubernetes to a new environment + - in [Getting Started from Scratch](docs/getting-started-guides/scratch.md) + - **User documentation** + - for people who want to run programs on an existing Kubernetes cluster + - in the [Kubernetes User Guide: Managing Applications](docs/user-guide/README.md) + *Tip: You can also view help documentation out on [http://kubernetes.io/docs/](http://kubernetes.io/docs/).* + - the [Kubectl Command Line Interface](docs/user-guide/kubectl/kubectl.md) is a detailed reference on + the `kubectl` CLI + - [User FAQ](https://github.com/kubernetes/kubernetes/wiki/User-FAQ) + - **Cluster administrator documentation** + - for people who want to create a Kubernetes cluster and administer it + - in the [Kubernetes Cluster Admin Guide](docs/admin/README.md) + - **Developer and API documentation** + - for people who want to write programs that access the Kubernetes API, write plugins + or extensions, or modify the core Kubernetes code + - in the [Kubernetes Developer Guide](docs/devel/README.md) + - see also [notes on the API](docs/api.md) + - see also the [API object documentation](docs/api-reference/README.md), a + detailed description of all fields found in the core API objects + - **Walkthroughs and examples** + - hands-on introduction and example config files + - in the [user guide](docs/user-guide/README.md#quick-walkthrough) + - in the [docs/examples directory](examples/) + - **Contributions from the Kubernetes community** + - in the [docs/contrib directory](contrib/) + - **Design documentation and design proposals** + - for people who want to understand the design of Kubernetes, and feature proposals + - design docs in the [Kubernetes Design Overview](docs/design/README.md) and the [docs/design directory](docs/design/) + - proposals in the [docs/proposals directory](docs/proposals/) + - **Wiki/FAQ** + - in the [wiki](https://github.com/kubernetes/kubernetes/wiki) + - troubleshooting information in the [troubleshooting guide](docs/troubleshooting.md) + +## Community, discussion, contribution, and support + +See which companies are committed to driving quality in Kubernetes on our [community page](http://kubernetes.io/community/). + +Do you want to help "shape the evolution of technologies that are container packaged, dynamically scheduled and microservices oriented?" + +You should consider joining the [Cloud Native Computing Foundation](https://cncf.io/about). For details about who's involved and how Kubernetes plays a role, read [their announcement](https://cncf.io/news/announcement/2015/07/new-cloud-native-computing-foundation-drive-alignment-among-container). + +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). + +### Are you ready to add to the discussion? + +We have presence on: + + * [Twitter](https://twitter.com/kubernetesio) + * [Google+](https://plus.google.com/u/0/b/116512812300813784482/116512812300813784482) + * [Blogger](http://blog.kubernetes.io/) + +You can also view recordings of past events and presentations on our [Media page](http://kubernetes.io/media/). + +For Q&A, our threads are at: + + * [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes) + * [Slack](http://slack.k8s.io/) + +### Want to contribute to Kubernetes? + +If you're interested in being a contributor and want to get involved in developing Kubernetes, start in the [Kubernetes Developer Guide](docs/devel/README.md) and also review the [contributor guidelines](CONTRIBUTING.md). + +Or, if you just have an idea for a new feature, see the [Kubernetes Features](https://github.com/kubernetes/features) repository for details on how to propose it. + +### Support + +While there are many different channels that you can use to get ahold of us, you can help make sure that we are efficient in getting you the help that you need. + +If you need support, start with the [troubleshooting guide](docs/troubleshooting.md#getting-help) and work your way through the process that we've outlined. + +That said, if you have questions, reach out to us one way or another. We don't bite! + +### Community resources + +* **Awesome-kubernetes** - [http://ramitsurana.github.io/awesome-kubernetes](http://ramitsurana.github.io/awesome-kubernetes) + +You can find more projects, tools and articles related to Kubernetes on the [awesome-kubernetes](https://github.com/ramitsurana/awesome-kubernetes) list. Add your project there and help us make it better. + +* **CoreKube** - [https://corekube.com](https://corekube.com): + +Instructive & educational resources for the Kubernetes community. By the community. + +* **Community Documentation** + +Here you can learn more about the current happenings in the [kubernetes community](https://github.com/kubernetes/community). + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/README.md?pixel)]() diff --git a/vendor/k8s.io/kubernetes/Vagrantfile b/vendor/k8s.io/kubernetes/Vagrantfile new file mode 100644 index 000000000000..f199ee3a7c50 --- /dev/null +++ b/vendor/k8s.io/kubernetes/Vagrantfile @@ -0,0 +1,314 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! +VAGRANTFILE_API_VERSION = "2" + +# Require a recent version of vagrant otherwise some have reported errors setting host names on boxes +Vagrant.require_version ">= 1.7.4" + +if ARGV.first == "up" && ENV['USING_KUBE_SCRIPTS'] != 'true' + raise Vagrant::Errors::VagrantError.new, < { + 'fedora' => { + # :box_url and :box_version are optional (and mutually exclusive); + # if :box_url is omitted the box will be retrieved by :box_name (and + # :box_version if provided) from + # http://atlas.hashicorp.com/boxes/search (formerly + # http://vagrantcloud.com/); this allows you override :box_name with + # your own value so long as you provide :box_url; for example, the + # "official" name of this box is "rickard-von-essen/ + # opscode_fedora-20", but by providing the URL and our own name, we + # make it appear as yet another provider under the "kube-fedora22" + # box + :box_name => 'kube-fedora23', + :box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/parallels/opscode_fedora-23_chef-provisionerless.box' + } + }, + :virtualbox => { + 'fedora' => { + :box_name => 'kube-fedora23', + :box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_fedora-23_chef-provisionerless.box' + } + }, + :libvirt => { + 'fedora' => { + :box_name => 'kube-fedora23', + :box_url => 'https://dl.fedoraproject.org/pub/fedora/linux/releases/23/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-23-20151030.x86_64.vagrant-libvirt.box' + } + }, + :vmware_desktop => { + 'fedora' => { + :box_name => 'kube-fedora23', + :box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/vmware/opscode_fedora-23_chef-provisionerless.box' + } + }, + :vsphere => { + 'fedora' => { + :box_name => 'vsphere-dummy', + :box_url => 'https://github.com/deromka/vagrant-vsphere/blob/master/vsphere-dummy.box?raw=true' + } + } +} + +# Give access to all physical cpu cores +# Previously cargo-culted from here: +# http://www.stefanwrobel.com/how-to-make-vagrant-performance-not-suck +# Rewritten to actually determine the number of hardware cores instead of assuming +# that the host has hyperthreading enabled. +host = RbConfig::CONFIG['host_os'] +if host =~ /darwin/ + $vm_cpus = `sysctl -n hw.physicalcpu`.to_i +elsif host =~ /linux/ + #This should work on most processors, however it will fail on ones without the core id field. + #So far i have only seen this on a raspberry pi. which you probably don't want to run vagrant on anyhow... + #But just in case we'll default to the result of nproc if we get 0 just to be safe. + $vm_cpus = `cat /proc/cpuinfo | grep 'core id' | sort -u | wc -l`.to_i + if $vm_cpus < 1 + $vm_cpus = `nproc`.to_i + end +else # sorry Windows folks, I can't help you + $vm_cpus = 2 +end + +# Give VM 1024MB of RAM by default +# In Fedora VM, tmpfs device is mapped to /tmp. tmpfs is given 50% of RAM allocation. +# When doing Salt provisioning, we copy approximately 200MB of content in /tmp before anything else happens. +# This causes problems if anything else was in /tmp or the other directories that are bound to tmpfs device (i.e /run, etc.) +$vm_master_mem = (ENV['KUBERNETES_MASTER_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1280).to_i +$vm_node_mem = (ENV['KUBERNETES_NODE_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 2048).to_i + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + if Vagrant.has_plugin?("vagrant-proxyconf") + $http_proxy = ENV['KUBERNETES_HTTP_PROXY'] || "" + $https_proxy = ENV['KUBERNETES_HTTPS_PROXY'] || "" + $no_proxy = ENV['KUBERNETES_NO_PROXY'] || "127.0.0.1" + config.proxy.http = $http_proxy + config.proxy.https = $https_proxy + config.proxy.no_proxy = $no_proxy + end + + # this corrects a bug in 1.8.5 where an invalid SSH key is inserted. + if Vagrant::VERSION == "1.8.5" + config.ssh.insert_key = false + end + + def setvmboxandurl(config, provider) + if ENV['KUBERNETES_BOX_NAME'] then + config.vm.box = ENV['KUBERNETES_BOX_NAME'] + + if ENV['KUBERNETES_BOX_URL'] then + config.vm.box_url = ENV['KUBERNETES_BOX_URL'] + end + + if ENV['KUBERNETES_BOX_VERSION'] then + config.vm.box_version = ENV['KUBERNETES_BOX_VERSION'] + end + else + config.vm.box = $kube_provider_boxes[provider][$kube_os][:box_name] + + if $kube_provider_boxes[provider][$kube_os][:box_url] then + config.vm.box_url = $kube_provider_boxes[provider][$kube_os][:box_url] + end + + if $kube_provider_boxes[provider][$kube_os][:box_version] then + config.vm.box_version = $kube_provider_boxes[provider][$kube_os][:box_version] + end + end + end + + def customize_vm(config, vm_mem) + + if $use_nfs then + config.vm.synced_folder ".", "/vagrant", nfs: true + end + + # Try VMWare Fusion first (see + # https://docs.vagrantup.com/v2/providers/basic_usage.html) + config.vm.provider :vmware_fusion do |v, override| + setvmboxandurl(override, :vmware_desktop) + v.vmx['memsize'] = vm_mem + v.vmx['numvcpus'] = $vm_cpus + end + + # configure libvirt provider + config.vm.provider :libvirt do |v, override| + setvmboxandurl(override, :libvirt) + v.memory = vm_mem + v.cpus = $vm_cpus + v.nested = true + v.volume_cache = 'none' + end + + # Then try VMWare Workstation + config.vm.provider :vmware_workstation do |v, override| + setvmboxandurl(override, :vmware_desktop) + v.vmx['memsize'] = vm_mem + v.vmx['numvcpus'] = $vm_cpus + end + + # Then try Parallels + config.vm.provider :parallels do |v, override| + setvmboxandurl(override, :parallels) + v.memory = vm_mem # v.customize ['set', :id, '--memsize', vm_mem] + v.cpus = $vm_cpus # v.customize ['set', :id, '--cpus', $vm_cpus] + + # Don't attempt to update the Parallels tools on the image (this can + # be done manually if necessary) + v.update_guest_tools = false # v.customize ['set', :id, '--tools-autoupdate', 'off'] + + # Set up Parallels folder sharing to behave like VirtualBox (i.e., + # mount the current directory as /vagrant and that's it) + v.customize ['set', :id, '--shf-guest', 'off'] + v.customize ['set', :id, '--shf-guest-automount', 'off'] + v.customize ['set', :id, '--shf-host', 'on'] + + # Synchronize VM clocks to host clock (Avoid certificate invalid issue) + v.customize ['set', :id, '--time-sync', 'on'] + + # Remove all auto-mounted "shared folders"; the result seems to + # persist between runs (i.e., vagrant halt && vagrant up) + override.vm.provision :shell, :inline => (%q{ + set -ex + if [ -d /media/psf ]; then + for i in /media/psf/*; do + if [ -d "${i}" ]; then + umount "${i}" || true + rmdir -v "${i}" + fi + done + rmdir -v /media/psf + fi + exit + }).strip + end + + # Then try vsphere + config.vm.provider :vsphere do |vsphere, override| + setvmboxandurl(override, :vsphere) + + #config.vm.hostname = ENV['MASTER_NAME'] + + config.ssh.username = ENV['MASTER_USER'] + config.ssh.password = ENV['MASTER_PASSWD'] + + config.ssh.pty = true + config.ssh.insert_key = true + #config.ssh.private_key_path = '~/.ssh/id_rsa_vsphere' + + # Don't attempt to update the tools on the image (this can + # be done manually if necessary) + # vsphere.update_guest_tools = false # v.customize ['set', :id, '--tools-autoupdate', 'off'] + + # The vSphere host we're going to connect to + vsphere.host = ENV['VAGRANT_VSPHERE_URL'] + + # The ESX host for the new VM + vsphere.compute_resource_name = ENV['VAGRANT_VSPHERE_RESOURCE_POOL'] + + # The resource pool for the new VM + #vsphere.resource_pool_name = 'Comp' + + # path to folder where new VM should be created, if not specified template's parent folder will be used + vsphere.vm_base_path = ENV['VAGRANT_VSPHERE_BASE_PATH'] + + # The template we're going to clone + vsphere.template_name = ENV['VAGRANT_VSPHERE_TEMPLATE_NAME'] + + # The name of the new machine + #vsphere.name = ENV['MASTER_NAME'] + + # vSphere login + vsphere.user = ENV['VAGRANT_VSPHERE_USERNAME'] + + # vSphere password + vsphere.password = ENV['VAGRANT_VSPHERE_PASSWORD'] + + # cpu count + vsphere.cpu_count = $vm_cpus + + # memory in MB + vsphere.memory_mb = vm_mem + + # If you don't have SSL configured correctly, set this to 'true' + vsphere.insecure = ENV['VAGRANT_VSPHERE_INSECURE'] + end + + + # Don't attempt to update Virtualbox Guest Additions (requires gcc) + if Vagrant.has_plugin?("vagrant-vbguest") then + config.vbguest.auto_update = false + end + # Finally, fall back to VirtualBox + config.vm.provider :virtualbox do |v, override| + setvmboxandurl(override, :virtualbox) + v.memory = vm_mem # v.customize ["modifyvm", :id, "--memory", vm_mem] + v.cpus = $vm_cpus # v.customize ["modifyvm", :id, "--cpus", $vm_cpus] + + # Use faster paravirtualized networking + v.customize ["modifyvm", :id, "--nictype1", "virtio"] + v.customize ["modifyvm", :id, "--nictype2", "virtio"] + end + end + + # Kubernetes master + config.vm.define "master" do |c| + customize_vm c, $vm_master_mem + if ENV['KUBE_TEMP'] then + script = "#{ENV['KUBE_TEMP']}/master-start.sh" + c.vm.provision "shell", run: "always", path: script + end + c.vm.network "private_network", ip: "#{$master_ip}" + end + + # Kubernetes node + $num_node.times do |n| + node_vm_name = "node-#{n+1}" + + config.vm.define node_vm_name do |node| + customize_vm node, $vm_node_mem + + node_ip = $node_ips[n] + if ENV['KUBE_TEMP'] then + script = "#{ENV['KUBE_TEMP']}/node-start-#{n}.sh" + node.vm.provision "shell", run: "always", path: script + end + node.vm.network "private_network", ip: "#{node_ip}" + end + end +end diff --git a/vendor/k8s.io/kubernetes/code-of-conduct.md b/vendor/k8s.io/kubernetes/code-of-conduct.md new file mode 100644 index 000000000000..6453201cad45 --- /dev/null +++ b/vendor/k8s.io/kubernetes/code-of-conduct.md @@ -0,0 +1,58 @@ +## Kubernetes Community Code of Conduct + +### Contributor Code of Conduct + +As contributors and maintainers of this project, and in the interest of fostering +an open and welcoming community, we pledge to respect all people who contribute +through reporting issues, posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project a harassment-free experience for +everyone, regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, body size, race, ethnicity, age, +religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, such as physical or electronic addresses, + without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are not +aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers +commit themselves to fairly and consistently applying these principles to every aspect +of managing this project. Project maintainers who do not follow or enforce the Code of +Conduct may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a Kubernetes maintainer, Sarah Novotny , and/or Dan Kohn . + +This Code of Conduct is adapted from the Contributor Covenant +(http://contributor-covenant.org), version 1.2.0, available at +http://contributor-covenant.org/version/1/2/0/ + +### Kubernetes Events Code of Conduct + +Kubernetes events are working conferences intended for professional networking and collaboration in the +Kubernetes community. Attendees are expected to behave according to professional standards and in accordance +with their employer's policies on appropriate workplace behavior. + +While at Kubernetes events or related social networking opportunities, attendees should not engage in +discriminatory or offensive speech or actions regarding gender, sexuality, race, or religion. Speakers should +be especially aware of these concerns. + +The Kubernetes team does not condone any statements by speakers contrary to these standards. The Kubernetes +team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to +be engaging in discriminatory or offensive speech or actions. + +Please bring any concerns to to the immediate attention of Kubernetes event staff + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/code-of-conduct.md?pixel)]() diff --git a/vendor/k8s.io/kubernetes/labels.yaml b/vendor/k8s.io/kubernetes/labels.yaml new file mode 100644 index 000000000000..0821c81d3ea6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/labels.yaml @@ -0,0 +1,285 @@ +labels: + - name: kind/bug + color: e11d21 + - name: kind/enhancement + color: c7def8 + - name: help-wanted + color: 006b75 + - name: area/test + color: fad8c7 + - name: kind/design + color: c7def8 + - name: priority/P0 + color: e11d21 + - name: priority/P1 + color: eb6420 + - name: priority/P2 + color: fbca04 + - name: 'cla: no' + color: e11d21 + - name: area/networking + color: 006b75 + - name: dependency/docker + color: fbfa04 + - name: area/security + color: d4c5f9 + - name: component/kubelet + color: 0052cc + - name: kind/documentation + color: c7def8 + - name: kind/cleanup + color: c7def8 + - name: area/performance + color: d4c5f9 + - name: area/introspection + color: bfe5bf + - name: area/api + color: e1ed21 + - name: area/build-release + color: bfe5bf + - name: area/scheduling + color: ab6420 + - name: area/isolation + color: aba420 + - name: area/app-config-deployment + color: fb6420 + - name: dependency/etcd + color: fbfa04 + - name: component/clientlib + color: 0052cc + - name: component/kube-proxy + color: 0052cc + - name: area/os/ubuntu + color: d4c5f9 + - name: kind/support + color: eb6420 + - name: area/downward-api + color: 8fffcf + - name: area/upward-api + color: a0c280 + - name: area/usability + color: bfe5bf + - name: area/kubelet-api + color: fbca04 + - name: area/platform/gce + color: d4c5f9 + - name: area/platform/azure + color: d4c5f9 + - name: area/platform/vagrant + color: d4c5f9 + - name: area/platform/vsphere + color: d4c5f9 + - name: area/os/fedora + color: d4c5f9 + - name: area/os/coreos + color: d4c5f9 + - name: kind/example + color: c7def8 + - name: area/images-registry + color: eba420 + - name: component/controller-manager + color: 0052cc + - name: component/apiserver + color: 0052cc + - name: component/ui + color: 0052cc + - name: area/batch + color: c7def8 + - name: area/HA + color: bfd4f2 + - name: area/extensibility + color: ffaa04 + - name: component/kubectl + color: 0052cc + - name: component/nodecontroller + color: 0052cc + - name: lgtm + color: 15dd18 + - name: area/monitoring + color: fef2c0 + - name: area/logging + color: fef2c0 + - name: area/cadvisor + color: fef2c0 + - name: area/platform/aws + color: d4c5f9 + - name: priority/P3 + color: fef2c0 + - name: area/cloudprovider + color: f1fd21 + - name: area/platform/gke + color: d4c5f9 + - name: area/reliability + color: a10d01 + - name: area/storage + color: 006b75 + - name: '[deprecated--DO NOT USE]old-team/master' + color: ffffff + - name: team/node + color: d2b48c + - name: team/cluster + color: d2b48c + - name: team/CSI-API Machinery SIG + color: d2b48c + - name: area/cluster-lifecycle + color: d4c5f9 + - name: 'cla: yes' + color: bfe5bf + - name: team/test-infra + color: d2b48c + - name: team/none + color: d2b48c + - name: area/platform/mesos + color: d4c5f9 + - name: area/upgrade + color: bfd4f2 + - name: release-note + color: c2e0c6 + - name: dependency/rkt + color: fbfa04 + - name: area/volumes + color: bfe5bf + - name: ok-to-merge + color: fbca04 + - name: team/mesosphere + color: d2b48c + - name: area/autoscaling + color: "5319e7" + - name: area/platform/openstack + color: d4c5f9 + - name: needs-rebase + color: BDBDBD + - name: cherrypick-candidate + color: fef2c0 + - name: team/ux + color: d2b48c + - name: team/api + color: d2b48c + - name: team/control-plane + color: d2b48c + - name: size/XS + color: "009900" + - name: size/S + color: 77bb00 + - name: size/M + color: eebb00 + - name: size/L + color: ee9900 + - name: size/XL + color: ee5500 + - name: size/XXL + color: ee0000 + - name: area/swagger + color: d4c5f9 + - name: needs-ok-to-merge + color: ededed + - name: retest-not-required + color: eb6420 + - name: kind/postmortem + color: bfe5bf + - name: area/cluster-federation + color: fad8c7 + - name: team/redhat + color: d2b48c + - name: kind/api-change + color: c7def8 + - name: kind/new-api + color: c7def8 + - name: kind/technical-debt + color: c7def8 + - name: kind/velocity-improvement + color: c7def8 + - name: kind/flake + color: f7c6c7 + - name: kind/feature + color: c7def8 + - name: team/release-infra + color: d2b48c + - name: area/system-requirement + color: bfdadc + - name: team/gke + color: d2b48c + - name: kind/workaround-removal + color: c7def8 + - name: area/admin + color: "009800" + - name: 'cla: human-approved' + color: bfe5bf + - name: area/devexp + color: "5319e7" + - name: 0 - Backlog + color: CCCCCC + - name: 1 - Ready + color: CCCCCC + - name: 2 - Working <= 5 + color: CCCCCC + - name: 3 - Review + color: CCCCCC + - name: 4 - Done + color: CCCCCC + - name: davidopp + color: bfdadc + - name: kind/mesos-flake + color: f7c6c7 + - name: team/huawei + color: d2b48c + - name: team/sig-aws + color: d2b48c + - name: kind/old-docs + color: c7def8 + - name: kind/ecosystem-enablement + color: c7def8 + - name: do-not-merge + color: e11d21 + - name: kind/friction + color: c7def8 + - name: cherrypick-approved + color: fef2c0 + - name: release-note-label-needed + color: db5a64 + - name: release-note-none + color: c2e0c6 + - name: area/stateful-apps + color: ff6060 + - name: release-note-action-required + color: c2e0c6 + - name: area/auth + color: 00cc00 + - name: area/example/cassandra + color: d4c5f9 + - name: stale + color: "795548" + - name: area/os/gci + color: e99695 + - name: documentation/content-gap + color: f9d0c4 + - name: documentation/out-of-date + color: f9d0c4 + - name: documentation/confusing + color: f9d0c4 + - name: documentation/better-examples + color: f9d0c4 + - name: component/node-e2e + color: 0052cc + - name: component/deployment + color: 0052cc + - name: release-note-experimental + color: c2e0c6 + - name: e2e-not-required [deprecated use retest-not-required] + color: ededed + - name: component/dns + color: 1d76db + - name: 0 - Triage + color: AAAAAA + - name: team/rktnetes + color: d2b48c + - name: team/workloads + color: d2b48c + - name: keep-open + color: fbca04 + - name: component/daemonset + color: 0052cc + - name: retest-not-required-docs-only + color: fbca04 + - name: approved + color: 0ffa16 diff --git a/vendor/k8s.io/kubernetes/pkg/OWNERS b/vendor/k8s.io/kubernetes/pkg/OWNERS new file mode 100644 index 000000000000..c79bb80fffbe --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/OWNERS @@ -0,0 +1,6 @@ +assignees: + - brendandburns + - dchen1107 + - lavalamp + - smarterclayton + - thockin diff --git a/vendor/k8s.io/kubernetes/pkg/util/BUILD b/vendor/k8s.io/kubernetes/pkg/util/BUILD new file mode 100644 index 000000000000..1934187ba053 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/BUILD @@ -0,0 +1,55 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "string_flag.go", + "template.go", + "trace.go", + "umask.go", + "util.go", + ] + select({ + "@io_bazel_rules_go//go/platform:windows_amd64": [ + "umask_windows.go", + ], + "//conditions:default": [], + }), + importpath = "k8s.io/kubernetes/pkg/util", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/golang/glog:go_default_library"], +) + +go_test( + name = "go_default_test", + srcs = [ + "template_test.go", + "util_test.go", + ], + importpath = "k8s.io/kubernetes/pkg/util", + library = ":go_default_library", + deps = [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/util/diff:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/k8s.io/kubernetes/pkg/util/clock:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/util/flag:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/util/sets:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/util/yaml:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/clock/BUILD b/vendor/k8s.io/kubernetes/pkg/util/clock/BUILD index 60fd4a5124ca..b1d88da97c4c 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/clock/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/clock/BUILD @@ -1,14 +1,17 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["clock.go"], importpath = "k8s.io/kubernetes/pkg/util/clock", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["clock_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/clock", + library = ":go_default_library", ) filegroup( @@ -22,4 +25,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/clock/clock_test.go b/vendor/k8s.io/kubernetes/pkg/util/clock/clock_test.go new file mode 100644 index 000000000000..27d34605f509 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/clock/clock_test.go @@ -0,0 +1,184 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clock + +import ( + "testing" + "time" +) + +func TestFakeClock(t *testing.T) { + startTime := time.Now() + tc := NewFakeClock(startTime) + tc.Step(time.Second) + now := tc.Now() + if now.Sub(startTime) != time.Second { + t.Errorf("input: %s now=%s gap=%s expected=%s", startTime, now, now.Sub(startTime), time.Second) + } + + tt := tc.Now() + tc.SetTime(tt.Add(time.Hour)) + if tc.Now().Sub(tt) != time.Hour { + t.Errorf("input: %s now=%s gap=%s expected=%s", tt, tc.Now(), tc.Now().Sub(tt), time.Hour) + } +} + +func TestFakeClockSleep(t *testing.T) { + startTime := time.Now() + tc := NewFakeClock(startTime) + tc.Sleep(time.Duration(1) * time.Hour) + now := tc.Now() + if now.Sub(startTime) != time.Hour { + t.Errorf("Fake sleep failed, expected time to advance by one hour, instead, its %v", now.Sub(startTime)) + } +} + +func TestFakeAfter(t *testing.T) { + tc := NewFakeClock(time.Now()) + if tc.HasWaiters() { + t.Errorf("unexpected waiter?") + } + oneSec := tc.After(time.Second) + if !tc.HasWaiters() { + t.Errorf("unexpected lack of waiter?") + } + + oneOhOneSec := tc.After(time.Second + time.Millisecond) + twoSec := tc.After(2 * time.Second) + select { + case <-oneSec: + t.Errorf("unexpected channel read") + case <-oneOhOneSec: + t.Errorf("unexpected channel read") + case <-twoSec: + t.Errorf("unexpected channel read") + default: + } + + tc.Step(999 * time.Millisecond) + select { + case <-oneSec: + t.Errorf("unexpected channel read") + case <-oneOhOneSec: + t.Errorf("unexpected channel read") + case <-twoSec: + t.Errorf("unexpected channel read") + default: + } + + tc.Step(time.Millisecond) + select { + case <-oneSec: + // Expected! + case <-oneOhOneSec: + t.Errorf("unexpected channel read") + case <-twoSec: + t.Errorf("unexpected channel read") + default: + t.Errorf("unexpected non-channel read") + } + tc.Step(time.Millisecond) + select { + case <-oneSec: + // should not double-trigger! + t.Errorf("unexpected channel read") + case <-oneOhOneSec: + // Expected! + case <-twoSec: + t.Errorf("unexpected channel read") + default: + t.Errorf("unexpected non-channel read") + } +} + +func TestFakeTick(t *testing.T) { + tc := NewFakeClock(time.Now()) + if tc.HasWaiters() { + t.Errorf("unexpected waiter?") + } + oneSec := tc.Tick(time.Second) + if !tc.HasWaiters() { + t.Errorf("unexpected lack of waiter?") + } + + oneOhOneSec := tc.Tick(time.Second + time.Millisecond) + twoSec := tc.Tick(2 * time.Second) + select { + case <-oneSec: + t.Errorf("unexpected channel read") + case <-oneOhOneSec: + t.Errorf("unexpected channel read") + case <-twoSec: + t.Errorf("unexpected channel read") + default: + } + + tc.Step(999 * time.Millisecond) // t=.999 + select { + case <-oneSec: + t.Errorf("unexpected channel read") + case <-oneOhOneSec: + t.Errorf("unexpected channel read") + case <-twoSec: + t.Errorf("unexpected channel read") + default: + } + + tc.Step(time.Millisecond) // t=1.000 + select { + case <-oneSec: + // Expected! + case <-oneOhOneSec: + t.Errorf("unexpected channel read") + case <-twoSec: + t.Errorf("unexpected channel read") + default: + t.Errorf("unexpected non-channel read") + } + tc.Step(time.Millisecond) // t=1.001 + select { + case <-oneSec: + // should not double-trigger! + t.Errorf("unexpected channel read") + case <-oneOhOneSec: + // Expected! + case <-twoSec: + t.Errorf("unexpected channel read") + default: + t.Errorf("unexpected non-channel read") + } + + tc.Step(time.Second) // t=2.001 + tc.Step(time.Second) // t=3.001 + tc.Step(time.Second) // t=4.001 + tc.Step(time.Second) // t=5.001 + + // The one second ticker should not accumulate ticks + accumulatedTicks := 0 + drained := false + for !drained { + select { + case <-oneSec: + accumulatedTicks++ + default: + drained = true + } + } + if accumulatedTicks != 1 { + t.Errorf("unexpected number of accumulated ticks: %d", accumulatedTicks) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/doc.go b/vendor/k8s.io/kubernetes/pkg/util/doc.go new file mode 100644 index 000000000000..f7e214f31627 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package util implements various utility functions used in both testing and implementation +// of Kubernetes. Package util may not depend on any other package in the Kubernetes +// package tree. +package util // import "k8s.io/kubernetes/pkg/util" diff --git a/vendor/k8s.io/kubernetes/pkg/util/errors/BUILD b/vendor/k8s.io/kubernetes/pkg/util/errors/BUILD deleted file mode 100644 index d53c9ad6bd70..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/errors/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "errors.go", - ], - importpath = "k8s.io/kubernetes/pkg/util/errors", -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/errors/errors.go b/vendor/k8s.io/kubernetes/pkg/util/errors/errors.go deleted file mode 100644 index 42631f216224..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/errors/errors.go +++ /dev/null @@ -1,168 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package errors - -import ( - "errors" - "fmt" -) - -// Aggregate represents an object that contains multiple errors, but does not -// necessarily have singular semantic meaning. -type Aggregate interface { - error - Errors() []error -} - -// NewAggregate converts a slice of errors into an Aggregate interface, which -// is itself an implementation of the error interface. If the slice is empty, -// this returns nil. -// It will check if any of the element of input error list is nil, to avoid -// nil pointer panic when call Error(). -func NewAggregate(errlist []error) Aggregate { - if len(errlist) == 0 { - return nil - } - // In case of input error list contains nil - var errs []error - for _, e := range errlist { - if e != nil { - errs = append(errs, e) - } - } - if len(errs) == 0 { - return nil - } - return aggregate(errs) -} - -// This helper implements the error and Errors interfaces. Keeping it private -// prevents people from making an aggregate of 0 errors, which is not -// an error, but does satisfy the error interface. -type aggregate []error - -// Error is part of the error interface. -func (agg aggregate) Error() string { - if len(agg) == 0 { - // This should never happen, really. - return "" - } - if len(agg) == 1 { - return agg[0].Error() - } - result := fmt.Sprintf("[%s", agg[0].Error()) - for i := 1; i < len(agg); i++ { - result += fmt.Sprintf(", %s", agg[i].Error()) - } - result += "]" - return result -} - -// Errors is part of the Aggregate interface. -func (agg aggregate) Errors() []error { - return []error(agg) -} - -// Matcher is used to match errors. Returns true if the error matches. -type Matcher func(error) bool - -// FilterOut removes all errors that match any of the matchers from the input -// error. If the input is a singular error, only that error is tested. If the -// input implements the Aggregate interface, the list of errors will be -// processed recursively. -// -// This can be used, for example, to remove known-OK errors (such as io.EOF or -// os.PathNotFound) from a list of errors. -func FilterOut(err error, fns ...Matcher) error { - if err == nil { - return nil - } - if agg, ok := err.(Aggregate); ok { - return NewAggregate(filterErrors(agg.Errors(), fns...)) - } - if !matchesError(err, fns...) { - return err - } - return nil -} - -// matchesError returns true if any Matcher returns true -func matchesError(err error, fns ...Matcher) bool { - for _, fn := range fns { - if fn(err) { - return true - } - } - return false -} - -// filterErrors returns any errors (or nested errors, if the list contains -// nested Errors) for which all fns return false. If no errors -// remain a nil list is returned. The resulting silec will have all -// nested slices flattened as a side effect. -func filterErrors(list []error, fns ...Matcher) []error { - result := []error{} - for _, err := range list { - r := FilterOut(err, fns...) - if r != nil { - result = append(result, r) - } - } - return result -} - -// Flatten takes an Aggregate, which may hold other Aggregates in arbitrary -// nesting, and flattens them all into a single Aggregate, recursively. -func Flatten(agg Aggregate) Aggregate { - result := []error{} - if agg == nil { - return nil - } - for _, err := range agg.Errors() { - if a, ok := err.(Aggregate); ok { - r := Flatten(a) - if r != nil { - result = append(result, r.Errors()...) - } - } else { - if err != nil { - result = append(result, err) - } - } - } - return NewAggregate(result) -} - -// AggregateGoroutines runs the provided functions in parallel, stuffing all -// non-nil errors into the returned Aggregate. -// Returns nil if all the functions complete successfully. -func AggregateGoroutines(funcs ...func() error) Aggregate { - errChan := make(chan error, len(funcs)) - for _, f := range funcs { - go func(f func() error) { errChan <- f() }(f) - } - errs := make([]error, 0) - for i := 0; i < cap(errChan); i++ { - if err := <-errChan; err != nil { - errs = append(errs, err) - } - } - return NewAggregate(errs) -} - -// ErrPreconditionViolated is returned when the precondition is violated -var ErrPreconditionViolated = errors.New("precondition is violated") diff --git a/vendor/k8s.io/kubernetes/pkg/util/flag/BUILD b/vendor/k8s.io/kubernetes/pkg/util/flag/BUILD index 21abd5513dbe..1e823b378de6 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/flag/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/flag/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", @@ -12,6 +7,7 @@ go_library( "tristate.go", ], importpath = "k8s.io/kubernetes/pkg/util/flag", + visibility = ["//visibility:public"], deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", @@ -29,4 +25,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/sets/BUILD b/vendor/k8s.io/kubernetes/pkg/util/sets/BUILD index f324b55ab922..17870b378f46 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/sets/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/sets/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -16,6 +11,14 @@ go_library( "string.go", ], importpath = "k8s.io/kubernetes/pkg/util/sets", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["set_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/sets", + library = ":go_default_library", ) filegroup( @@ -29,4 +32,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/sets/set_test.go b/vendor/k8s.io/kubernetes/pkg/util/sets/set_test.go new file mode 100644 index 000000000000..df722ec27125 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/sets/set_test.go @@ -0,0 +1,270 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +import ( + "reflect" + "testing" +) + +func TestStringSet(t *testing.T) { + s := String{} + s2 := String{} + if len(s) != 0 { + t.Errorf("Expected len=0: %d", len(s)) + } + s.Insert("a", "b") + if len(s) != 2 { + t.Errorf("Expected len=2: %d", len(s)) + } + s.Insert("c") + if s.Has("d") { + t.Errorf("Unexpected contents: %#v", s) + } + if !s.Has("a") { + t.Errorf("Missing contents: %#v", s) + } + s.Delete("a") + if s.Has("a") { + t.Errorf("Unexpected contents: %#v", s) + } + s.Insert("a") + if s.HasAll("a", "b", "d") { + t.Errorf("Unexpected contents: %#v", s) + } + if !s.HasAll("a", "b") { + t.Errorf("Missing contents: %#v", s) + } + s2.Insert("a", "b", "d") + if s.IsSuperset(s2) { + t.Errorf("Unexpected contents: %#v", s) + } + s2.Delete("d") + if !s.IsSuperset(s2) { + t.Errorf("Missing contents: %#v", s) + } +} + +func TestStringSetDeleteMultiples(t *testing.T) { + s := String{} + s.Insert("a", "b", "c") + if len(s) != 3 { + t.Errorf("Expected len=3: %d", len(s)) + } + + s.Delete("a", "c") + if len(s) != 1 { + t.Errorf("Expected len=1: %d", len(s)) + } + if s.Has("a") { + t.Errorf("Unexpected contents: %#v", s) + } + if s.Has("c") { + t.Errorf("Unexpected contents: %#v", s) + } + if !s.Has("b") { + t.Errorf("Missing contents: %#v", s) + } + +} + +func TestNewStringSet(t *testing.T) { + s := NewString("a", "b", "c") + if len(s) != 3 { + t.Errorf("Expected len=3: %d", len(s)) + } + if !s.Has("a") || !s.Has("b") || !s.Has("c") { + t.Errorf("Unexpected contents: %#v", s) + } +} + +func TestStringSetList(t *testing.T) { + s := NewString("z", "y", "x", "a") + if !reflect.DeepEqual(s.List(), []string{"a", "x", "y", "z"}) { + t.Errorf("List gave unexpected result: %#v", s.List()) + } +} + +func TestStringSetDifference(t *testing.T) { + a := NewString("1", "2", "3") + b := NewString("1", "2", "4", "5") + c := a.Difference(b) + d := b.Difference(a) + if len(c) != 1 { + t.Errorf("Expected len=1: %d", len(c)) + } + if !c.Has("3") { + t.Errorf("Unexpected contents: %#v", c.List()) + } + if len(d) != 2 { + t.Errorf("Expected len=2: %d", len(d)) + } + if !d.Has("4") || !d.Has("5") { + t.Errorf("Unexpected contents: %#v", d.List()) + } +} + +func TestStringSetHasAny(t *testing.T) { + a := NewString("1", "2", "3") + + if !a.HasAny("1", "4") { + t.Errorf("expected true, got false") + } + + if a.HasAny("0", "4") { + t.Errorf("expected false, got true") + } +} + +func TestStringSetEquals(t *testing.T) { + // Simple case (order doesn't matter) + a := NewString("1", "2") + b := NewString("2", "1") + if !a.Equal(b) { + t.Errorf("Expected to be equal: %v vs %v", a, b) + } + + // It is a set; duplicates are ignored + b = NewString("2", "2", "1") + if !a.Equal(b) { + t.Errorf("Expected to be equal: %v vs %v", a, b) + } + + // Edge cases around empty sets / empty strings + a = NewString() + b = NewString() + if !a.Equal(b) { + t.Errorf("Expected to be equal: %v vs %v", a, b) + } + + b = NewString("1", "2", "3") + if a.Equal(b) { + t.Errorf("Expected to be not-equal: %v vs %v", a, b) + } + + b = NewString("1", "2", "") + if a.Equal(b) { + t.Errorf("Expected to be not-equal: %v vs %v", a, b) + } + + // Check for equality after mutation + a = NewString() + a.Insert("1") + if a.Equal(b) { + t.Errorf("Expected to be not-equal: %v vs %v", a, b) + } + + a.Insert("2") + if a.Equal(b) { + t.Errorf("Expected to be not-equal: %v vs %v", a, b) + } + + a.Insert("") + if !a.Equal(b) { + t.Errorf("Expected to be equal: %v vs %v", a, b) + } + + a.Delete("") + if a.Equal(b) { + t.Errorf("Expected to be not-equal: %v vs %v", a, b) + } +} + +func TestStringUnion(t *testing.T) { + tests := []struct { + s1 String + s2 String + expected String + }{ + { + NewString("1", "2", "3", "4"), + NewString("3", "4", "5", "6"), + NewString("1", "2", "3", "4", "5", "6"), + }, + { + NewString("1", "2", "3", "4"), + NewString(), + NewString("1", "2", "3", "4"), + }, + { + NewString(), + NewString("1", "2", "3", "4"), + NewString("1", "2", "3", "4"), + }, + { + NewString(), + NewString(), + NewString(), + }, + } + + for _, test := range tests { + union := test.s1.Union(test.s2) + if union.Len() != test.expected.Len() { + t.Errorf("Expected union.Len()=%d but got %d", test.expected.Len(), union.Len()) + } + + if !union.Equal(test.expected) { + t.Errorf("Expected union.Equal(expected) but not true. union:%v expected:%v", union.List(), test.expected.List()) + } + } +} + +func TestStringIntersection(t *testing.T) { + tests := []struct { + s1 String + s2 String + expected String + }{ + { + NewString("1", "2", "3", "4"), + NewString("3", "4", "5", "6"), + NewString("3", "4"), + }, + { + NewString("1", "2", "3", "4"), + NewString("1", "2", "3", "4"), + NewString("1", "2", "3", "4"), + }, + { + NewString("1", "2", "3", "4"), + NewString(), + NewString(), + }, + { + NewString(), + NewString("1", "2", "3", "4"), + NewString(), + }, + { + NewString(), + NewString(), + NewString(), + }, + } + + for _, test := range tests { + intersection := test.s1.Intersection(test.s2) + if intersection.Len() != test.expected.Len() { + t.Errorf("Expected intersection.Len()=%d but got %d", test.expected.Len(), intersection.Len()) + } + + if !intersection.Equal(test.expected) { + t.Errorf("Expected intersection.Equal(expected) but not true. intersection:%v expected:%v", intersection.List(), test.expected.List()) + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/string_flag.go b/vendor/k8s.io/kubernetes/pkg/util/string_flag.go new file mode 100644 index 000000000000..9d6a00a15924 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/string_flag.go @@ -0,0 +1,56 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +// StringFlag is a string flag compatible with flags and pflags that keeps track of whether it had a value supplied or not. +type StringFlag struct { + // If Set has been invoked this value is true + provided bool + // The exact value provided on the flag + value string +} + +func NewStringFlag(defaultVal string) StringFlag { + return StringFlag{value: defaultVal} +} + +func (f *StringFlag) Default(value string) { + f.value = value +} + +func (f StringFlag) String() string { + return f.value +} + +func (f StringFlag) Value() string { + return f.value +} + +func (f *StringFlag) Set(value string) error { + f.value = value + f.provided = true + + return nil +} + +func (f StringFlag) Provided() bool { + return f.provided +} + +func (f *StringFlag) Type() string { + return "string" +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/template.go b/vendor/k8s.io/kubernetes/pkg/util/template.go new file mode 100644 index 000000000000..d09d7dc86743 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/template.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bytes" + "go/doc" + "io" + "strings" + "text/template" +) + +func wrap(indent string, s string) string { + var buf bytes.Buffer + doc.ToText(&buf, s, indent, indent+" ", 80-len(indent)) + return buf.String() +} + +// ExecuteTemplate executes templateText with data and output written to w. +func ExecuteTemplate(w io.Writer, templateText string, data interface{}) error { + t := template.New("top") + t.Funcs(template.FuncMap{ + "trim": strings.TrimSpace, + "wrap": wrap, + }) + template.Must(t.Parse(templateText)) + return t.Execute(w, data) +} + +func ExecuteTemplateToString(templateText string, data interface{}) (string, error) { + b := bytes.Buffer{} + err := ExecuteTemplate(&b, templateText, data) + return b.String(), err +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/template_test.go b/vendor/k8s.io/kubernetes/pkg/util/template_test.go new file mode 100644 index 000000000000..8f075d1adbd9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/template_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWrap(t *testing.T) { + tt := `before ->{{.Long | wrap "**"}}<- after` + data := struct { + Long string + }{ + `Hodor, hodor; hodor hodor; +hodor hodor... Hodor hodor hodor? Hodor. Hodor hodor hodor hodor... +Hodor hodor hodor; hodor hodor hodor! Hodor, hodor. Hodor. Hodor, +HODOR hodor, hodor hodor; hodor hodor; hodor HODOR hodor, hodor hodor? +Hodor. Hodor hodor - hodor hodor. Hodor hodor HODOR! Hodor hodor - hodor... +Hodor hodor HODOR hodor, hodor hodor hodor! Hodor, hodor... Hodor hodor +hodor hodor hodor hodor! Hodor, hodor; hodor hodor. Hodor.`, + } + output, _ := ExecuteTemplateToString(tt, data) + t.Logf("%q", output) + + assert.Equal(t, `before ->**Hodor, hodor; hodor hodor; hodor hodor... Hodor hodor hodor? Hodor. Hodor +**hodor hodor hodor... Hodor hodor hodor; hodor hodor hodor! Hodor, hodor. +**Hodor. Hodor, HODOR hodor, hodor hodor; hodor hodor; hodor HODOR hodor, hodor +**hodor? Hodor. Hodor hodor - hodor hodor. Hodor hodor HODOR! Hodor hodor - +**hodor... Hodor hodor HODOR hodor, hodor hodor hodor! Hodor, hodor... Hodor +**hodor hodor hodor hodor hodor! Hodor, hodor; hodor hodor. Hodor. +<- after`, output) +} + +func TestTrim(t *testing.T) { + tt := `before ->{{.Messy | trim }}<- after` + data := struct { + Messy string + }{ + "\t stuff\n \r ", + } + output, _ := ExecuteTemplateToString(tt, data) + t.Logf("%q", output) + + assert.Equal(t, `before ->stuff<- after`, output) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/trace.go b/vendor/k8s.io/kubernetes/pkg/util/trace.go new file mode 100644 index 000000000000..fe93db8df6f4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/trace.go @@ -0,0 +1,72 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bytes" + "fmt" + "time" + + "github.com/golang/glog" +) + +type traceStep struct { + stepTime time.Time + msg string +} + +type Trace struct { + name string + startTime time.Time + steps []traceStep +} + +func NewTrace(name string) *Trace { + return &Trace{name, time.Now(), nil} +} + +func (t *Trace) Step(msg string) { + if t.steps == nil { + // traces almost always have less than 6 steps, do this to avoid more than a single allocation + t.steps = make([]traceStep, 0, 6) + } + t.steps = append(t.steps, traceStep{time.Now(), msg}) +} + +func (t *Trace) Log() { + endTime := time.Now() + var buffer bytes.Buffer + + buffer.WriteString(fmt.Sprintf("Trace %q (started %v):\n", t.name, t.startTime)) + lastStepTime := t.startTime + for _, step := range t.steps { + buffer.WriteString(fmt.Sprintf("[%v] [%v] %v\n", step.stepTime.Sub(t.startTime), step.stepTime.Sub(lastStepTime), step.msg)) + lastStepTime = step.stepTime + } + buffer.WriteString(fmt.Sprintf("[%v] [%v] END\n", endTime.Sub(t.startTime), endTime.Sub(lastStepTime))) + glog.Info(buffer.String()) +} + +func (t *Trace) LogIfLong(threshold time.Duration) { + if time.Since(t.startTime) >= threshold { + t.Log() + } +} + +func (t *Trace) TotalTime() time.Duration { + return time.Since(t.startTime) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/errors/doc.go b/vendor/k8s.io/kubernetes/pkg/util/umask.go similarity index 75% rename from vendor/k8s.io/kubernetes/pkg/util/errors/doc.go rename to vendor/k8s.io/kubernetes/pkg/util/umask.go index b3b39bc388f3..35ccce50bd57 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/errors/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/util/umask.go @@ -1,5 +1,7 @@ +// +build !windows + /* -Copyright 2015 The Kubernetes Authors. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,5 +16,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package errors implements various utility functions and types around errors. -package errors +package util + +import ( + "syscall" +) + +func Umask(mask int) (old int, err error) { + return syscall.Umask(mask), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/umask_windows.go b/vendor/k8s.io/kubernetes/pkg/util/umask_windows.go new file mode 100644 index 000000000000..8c1b2cbc7d5f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/umask_windows.go @@ -0,0 +1,27 @@ +// +build windows + +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "errors" +) + +func Umask(mask int) (old int, err error) { + return 0, errors.New("platform and architecture is not supported") +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/util.go b/vendor/k8s.io/kubernetes/pkg/util/util.go new file mode 100644 index 000000000000..7a9414957849 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/util.go @@ -0,0 +1,147 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "os" + "reflect" + "regexp" +) + +// Takes a list of strings and compiles them into a list of regular expressions +func CompileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) { + regexps := []*regexp.Regexp{} + for _, regexpStr := range regexpStrings { + r, err := regexp.Compile(regexpStr) + if err != nil { + return []*regexp.Regexp{}, err + } + regexps = append(regexps, r) + } + return regexps, nil +} + +// Detects if using systemd as the init system +// Please note that simply reading /proc/1/cmdline can be misleading because +// some installation of various init programs can automatically make /sbin/init +// a symlink or even a renamed version of their main program. +// TODO(dchen1107): realiably detects the init system using on the system: +// systemd, upstart, initd, etc. +func UsingSystemdInitSystem() bool { + if _, err := os.Stat("/run/systemd/system"); err == nil { + return true + } + + return false +} + +// Tests whether all pointer fields in a struct are nil. This is useful when, +// for example, an API struct is handled by plugins which need to distinguish +// "no plugin accepted this spec" from "this spec is empty". +// +// This function is only valid for structs and pointers to structs. Any other +// type will cause a panic. Passing a typed nil pointer will return true. +func AllPtrFieldsNil(obj interface{}) bool { + v := reflect.ValueOf(obj) + if !v.IsValid() { + panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) + } + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return true + } + v = v.Elem() + } + for i := 0; i < v.NumField(); i++ { + if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { + return false + } + } + return true +} + +func FileExists(filename string) (bool, error) { + if _, err := os.Stat(filename); os.IsNotExist(err) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + +// borrowed from ioutil.ReadDir +// ReadDir reads the directory named by dirname and returns +// a list of directory entries, minus those with lstat errors +func ReadDirNoExit(dirname string) ([]os.FileInfo, []error, error) { + if dirname == "" { + dirname = "." + } + + f, err := os.Open(dirname) + if err != nil { + return nil, nil, err + } + defer f.Close() + + names, err := f.Readdirnames(-1) + list := make([]os.FileInfo, 0, len(names)) + errs := make([]error, 0, len(names)) + for _, filename := range names { + fip, lerr := os.Lstat(dirname + "/" + filename) + if os.IsNotExist(lerr) { + // File disappeared between readdir + stat. + // Just treat it as if it didn't exist. + continue + } + + list = append(list, fip) + errs = append(errs, lerr) + } + + return list, errs, nil +} + +// IntPtr returns a pointer to an int +func IntPtr(i int) *int { + o := i + return &o +} + +// Int32Ptr returns a pointer to an int32 +func Int32Ptr(i int32) *int32 { + o := i + return &o +} + +// IntPtrDerefOr dereference the int ptr and returns it i not nil, +// else returns def. +func IntPtrDerefOr(ptr *int, def int) int { + if ptr != nil { + return *ptr + } + return def +} + +// Int32PtrDerefOr dereference the int32 ptr and returns it i not nil, +// else returns def. +func Int32PtrDerefOr(ptr *int32, def int32) int32 { + if ptr != nil { + return *ptr + } + return def +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/util_test.go b/vendor/k8s.io/kubernetes/pkg/util/util_test.go new file mode 100644 index 000000000000..36a6641dadb8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/util_test.go @@ -0,0 +1,101 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "testing" + + "k8s.io/kubernetes/pkg/util/diff" +) + +func TestStringDiff(t *testing.T) { + diff := diff.StringDiff("aaabb", "aaacc") + expect := "aaa\n\nA: bb\n\nB: cc\n\n" + if diff != expect { + t.Errorf("diff returned %v", diff) + } +} + +func TestCompileRegex(t *testing.T) { + uncompiledRegexes := []string{"endsWithMe$", "^startingWithMe"} + regexes, err := CompileRegexps(uncompiledRegexes) + + if err != nil { + t.Errorf("Failed to compile legal regexes: '%v': %v", uncompiledRegexes, err) + } + if len(regexes) != len(uncompiledRegexes) { + t.Errorf("Wrong number of regexes returned: '%v': %v", uncompiledRegexes, regexes) + } + + if !regexes[0].MatchString("Something that endsWithMe") { + t.Errorf("Wrong regex returned: '%v': %v", uncompiledRegexes[0], regexes[0]) + } + if regexes[0].MatchString("Something that doesn't endsWithMe.") { + t.Errorf("Wrong regex returned: '%v': %v", uncompiledRegexes[0], regexes[0]) + } + if !regexes[1].MatchString("startingWithMe is very important") { + t.Errorf("Wrong regex returned: '%v': %v", uncompiledRegexes[1], regexes[1]) + } + if regexes[1].MatchString("not startingWithMe should fail") { + t.Errorf("Wrong regex returned: '%v': %v", uncompiledRegexes[1], regexes[1]) + } +} + +func TestAllPtrFieldsNil(t *testing.T) { + testCases := []struct { + obj interface{} + expected bool + }{ + {struct{}{}, true}, + {struct{ Foo int }{12345}, true}, + {&struct{ Foo int }{12345}, true}, + {struct{ Foo *int }{nil}, true}, + {&struct{ Foo *int }{nil}, true}, + {struct { + Foo int + Bar *int + }{12345, nil}, true}, + {&struct { + Foo int + Bar *int + }{12345, nil}, true}, + {struct { + Foo *int + Bar *int + }{nil, nil}, true}, + {&struct { + Foo *int + Bar *int + }{nil, nil}, true}, + {struct{ Foo *int }{new(int)}, false}, + {&struct{ Foo *int }{new(int)}, false}, + {struct { + Foo *int + Bar *int + }{nil, new(int)}, false}, + {&struct { + Foo *int + Bar *int + }{nil, new(int)}, false}, + {(*struct{})(nil), true}, + } + for i, tc := range testCases { + if AllPtrFieldsNil(tc.obj) != tc.expected { + t.Errorf("case[%d]: expected %t, got %t", i, tc.expected, !tc.expected) + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/yaml/BUILD b/vendor/k8s.io/kubernetes/pkg/util/yaml/BUILD index 292b89000d12..4ddac79d3de5 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/yaml/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/yaml/BUILD @@ -1,20 +1,23 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["decoder.go"], importpath = "k8s.io/kubernetes/pkg/util/yaml", + visibility = ["//visibility:public"], deps = [ "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/golang/glog:go_default_library", ], ) +go_test( + name = "go_default_test", + srcs = ["decoder_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/yaml", + library = ":go_default_library", +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -26,4 +29,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/yaml/decoder_test.go b/vendor/k8s.io/kubernetes/pkg/util/yaml/decoder_test.go new file mode 100644 index 000000000000..8985907c4566 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/yaml/decoder_test.go @@ -0,0 +1,316 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "math/rand" + "reflect" + "strings" + "testing" +) + +func TestSplitYAMLDocument(t *testing.T) { + testCases := []struct { + input string + atEOF bool + expect string + adv int + }{ + {"foo", true, "foo", 3}, + {"fo", false, "", 0}, + + {"---", true, "---", 3}, + {"---\n", true, "---\n", 4}, + {"---\n", false, "", 0}, + + {"\n---\n", false, "", 5}, + {"\n---\n", true, "", 5}, + + {"abc\n---\ndef", true, "abc", 8}, + {"def", true, "def", 3}, + {"", true, "", 0}, + } + for i, testCase := range testCases { + adv, token, err := splitYAMLDocument([]byte(testCase.input), testCase.atEOF) + if err != nil { + t.Errorf("%d: unexpected error: %v", i, err) + continue + } + if adv != testCase.adv { + t.Errorf("%d: advance did not match: %d %d", i, testCase.adv, adv) + } + if testCase.expect != string(token) { + t.Errorf("%d: token did not match: %q %q", i, testCase.expect, string(token)) + } + } +} + +func TestGuessJSON(t *testing.T) { + if r, isJSON := GuessJSONStream(bytes.NewReader([]byte(" \n{}")), 100); !isJSON { + t.Fatalf("expected stream to be JSON") + } else { + b := make([]byte, 30) + n, err := r.Read(b) + if err != nil || n != 4 { + t.Fatalf("unexpected body: %d / %v", n, err) + } + if string(b[:n]) != " \n{}" { + t.Fatalf("unexpected body: %q", string(b[:n])) + } + } +} + +func TestScanYAML(t *testing.T) { + s := bufio.NewScanner(bytes.NewReader([]byte(`--- +stuff: 1 + +--- + `))) + s.Split(splitYAMLDocument) + if !s.Scan() { + t.Fatalf("should have been able to scan") + } + t.Logf("scan: %s", s.Text()) + if !s.Scan() { + t.Fatalf("should have been able to scan") + } + t.Logf("scan: %s", s.Text()) + if s.Scan() { + t.Fatalf("scan should have been done") + } + if s.Err() != nil { + t.Fatalf("err should have been nil: %v", s.Err()) + } +} + +func TestDecodeYAML(t *testing.T) { + s := NewYAMLToJSONDecoder(bytes.NewReader([]byte(`--- +stuff: 1 + +--- + `))) + obj := generic{} + if err := s.Decode(&obj); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if fmt.Sprintf("%#v", obj) != `yaml.generic{"stuff":1}` { + t.Errorf("unexpected object: %#v", obj) + } + obj = generic{} + if err := s.Decode(&obj); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(obj) != 0 { + t.Fatalf("unexpected object: %#v", obj) + } + obj = generic{} + if err := s.Decode(&obj); err != io.EOF { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestDecodeBrokenYAML(t *testing.T) { + s := NewYAMLOrJSONDecoder(bytes.NewReader([]byte(`--- +stuff: 1 + test-foo: 1 + +--- + `)), 100) + obj := generic{} + err := s.Decode(&obj) + if err == nil { + t.Fatal("expected error with yaml: prefix, got no error") + } + fmt.Printf("err: %s\n", err.Error()) + if !strings.HasPrefix(err.Error(), "yaml: line 1:") { + t.Fatalf("expected %q to have 'yaml: line 1:' prefix", err.Error()) + } +} + +func TestDecodeBrokenJSON(t *testing.T) { + s := NewYAMLOrJSONDecoder(bytes.NewReader([]byte(`{ + "foo": { + "stuff": 1 + "otherStuff": 2 + } +} + `)), 100) + obj := generic{} + err := s.Decode(&obj) + if err == nil { + t.Fatal("expected error with json: prefix, got no error") + } + if !strings.HasPrefix(err.Error(), "json: line 3:") { + t.Fatalf("expected %q to have 'json: line 3:' prefix", err.Error()) + } +} + +type generic map[string]interface{} + +func TestYAMLOrJSONDecoder(t *testing.T) { + testCases := []struct { + input string + buffer int + isJSON bool + err bool + out []generic + }{ + {` {"1":2}{"3":4}`, 2, true, false, []generic{ + {"1": 2}, + {"3": 4}, + }}, + {" \n{}", 3, true, false, []generic{ + {}, + }}, + {" \na: b", 2, false, false, []generic{ + {"a": "b"}, + }}, + {" \n{\"a\": \"b\"}", 2, false, true, []generic{ + {"a": "b"}, + }}, + {" \n{\"a\": \"b\"}", 3, true, false, []generic{ + {"a": "b"}, + }}, + {` {"a":"b"}`, 100, true, false, []generic{ + {"a": "b"}, + }}, + {"", 1, false, false, []generic{}}, + {"foo: bar\n---\nbaz: biz", 100, false, false, []generic{ + {"foo": "bar"}, + {"baz": "biz"}, + }}, + {"foo: bar\n---\n", 100, false, false, []generic{ + {"foo": "bar"}, + }}, + {"foo: bar\n---", 100, false, false, []generic{ + {"foo": "bar"}, + }}, + {"foo: bar\n--", 100, false, true, []generic{ + {"foo": "bar"}, + }}, + {"foo: bar\n-", 100, false, true, []generic{ + {"foo": "bar"}, + }}, + {"foo: bar\n", 100, false, false, []generic{ + {"foo": "bar"}, + }}, + } + for i, testCase := range testCases { + decoder := NewYAMLOrJSONDecoder(bytes.NewReader([]byte(testCase.input)), testCase.buffer) + objs := []generic{} + + var err error + for { + out := make(generic) + err = decoder.Decode(&out) + if err != nil { + break + } + objs = append(objs, out) + } + if err != io.EOF { + switch { + case testCase.err && err == nil: + t.Errorf("%d: unexpected non-error", i) + continue + case !testCase.err && err != nil: + t.Errorf("%d: unexpected error: %v", i, err) + continue + case err != nil: + continue + } + } + switch decoder.decoder.(type) { + case *YAMLToJSONDecoder: + if testCase.isJSON { + t.Errorf("%d: expected JSON decoder, got YAML", i) + } + case *json.Decoder: + if !testCase.isJSON { + t.Errorf("%d: expected YAML decoder, got JSON", i) + } + } + if fmt.Sprintf("%#v", testCase.out) != fmt.Sprintf("%#v", objs) { + t.Errorf("%d: objects were not equal: \n%#v\n%#v", i, testCase.out, objs) + } + } +} + +func TestReadSingleLongLine(t *testing.T) { + testReadLines(t, []int{128 * 1024}) +} + +func TestReadRandomLineLengths(t *testing.T) { + minLength := 100 + maxLength := 96 * 1024 + maxLines := 100 + + lineLengths := make([]int, maxLines) + for i := 0; i < maxLines; i++ { + lineLengths[i] = rand.Intn(maxLength-minLength) + minLength + } + + testReadLines(t, lineLengths) +} + +func testReadLines(t *testing.T, lineLengths []int) { + var ( + lines [][]byte + inputStream []byte + ) + for _, lineLength := range lineLengths { + inputLine := make([]byte, lineLength+1) + for i := 0; i < lineLength; i++ { + char := rand.Intn('z'-'A') + 'A' + inputLine[i] = byte(char) + } + inputLine[len(inputLine)-1] = '\n' + lines = append(lines, inputLine) + } + for _, line := range lines { + inputStream = append(inputStream, line...) + } + + // init Reader + reader := bufio.NewReader(bytes.NewReader(inputStream)) + lineReader := &LineReader{reader: reader} + + // read lines + var readLines [][]byte + for range lines { + bytes, err := lineReader.Read() + if err != nil && err != io.EOF { + t.Fatalf("failed to read lines: %v", err) + } + readLines = append(readLines, bytes) + } + + // validate + for i := range lines { + if len(lines[i]) != len(readLines[i]) { + t.Fatalf("expected line length: %d, but got %d", len(lines[i]), len(readLines[i])) + } + if !reflect.DeepEqual(lines[i], readLines[i]) { + t.Fatalf("expected line: %v, but got %v", lines[i], readLines[i]) + } + } +} diff --git a/verify/update-bazel.sh b/verify/update-bazel.sh index da40b4021006..f37cbe64045d 100755 --- a/verify/update-bazel.sh +++ b/verify/update-bazel.sh @@ -31,6 +31,8 @@ TMP_GOPATH=$(mktemp -d) c72631a220406c4fae276861ee286aaec82c5af2 \ "${TMP_GOPATH}" +touch "${TESTINFRA_ROOT}/vendor/BUILD" + "${TMP_GOPATH}/bin/gazelle" fix \ -build_file_name=BUILD,BUILD.bazel \ -external=vendored \ diff --git a/verify/verify-bazel.sh b/verify/verify-bazel.sh index 46240cefe1ac..f75cbbe0d192 100755 --- a/verify/verify-bazel.sh +++ b/verify/verify-bazel.sh @@ -31,6 +31,8 @@ TMP_GOPATH=$(mktemp -d) c72631a220406c4fae276861ee286aaec82c5af2 \ "${TMP_GOPATH}" +touch "${TESTINFRA_ROOT}/vendor/BUILD" + gazelle_diff=$("${TMP_GOPATH}/bin/gazelle" fix \ -build_file_name=BUILD,BUILD.bazel \ -external=vendored \ diff --git a/verify/verify-pylint.sh b/verify/verify-pylint.sh index 5687bf5670ec..28a40eb47e07 100755 --- a/verify/verify-pylint.sh +++ b/verify/verify-pylint.sh @@ -21,4 +21,4 @@ export PYLINTHOME=$(mktemp -d) pylint="$(dirname $0)/pylint_bin" shopt -s extglob globstar -${pylint} !(gubernator|external|bazel-*)/**/*.py +${pylint} !(gubernator|external|vendor|bazel-*)/**/*.py